summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandre Oliva <lxoliva@fsfla.org>2010-08-12 07:09:07 +0000
committerAlexandre Oliva <lxoliva@fsfla.org>2010-08-12 07:09:07 +0000
commit387d8a65bd544c461e947b0cd0eaa335c049d893 (patch)
treeaa2b841b80cd64a13759dfd704f6e29d58982401
parent49cd46fe0de65217b9401ca38b1da8134b0357b1 (diff)
downloadlinux-libre-raptor-387d8a65bd544c461e947b0cd0eaa335c049d893.tar.gz
linux-libre-raptor-387d8a65bd544c461e947b0cd0eaa335c049d893.zip
2.6.32.18-159.fc12
-rw-r--r--freed-ora/current/F-12/.cvsignore12
-rw-r--r--freed-ora/current/F-12/.gitignore6
-rw-r--r--freed-ora/current/F-12/CVS/Entries188
-rw-r--r--freed-ora/current/F-12/CVS/Repository1
-rw-r--r--freed-ora/current/F-12/CVS/Root1
-rw-r--r--freed-ora/current/F-12/CVS/Tag1
-rw-r--r--freed-ora/current/F-12/Makefile31
-rw-r--r--freed-ora/current/F-12/Makefile.config105
-rw-r--r--freed-ora/current/F-12/README.txt67
-rw-r--r--freed-ora/current/F-12/acpi-ec-add-delay-before-write.patch52
-rw-r--r--freed-ora/current/F-12/add-appleir-usb-driver.patch635
-rw-r--r--freed-ora/current/F-12/btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch42
-rw-r--r--freed-ora/current/F-12/config-arm102
-rw-r--r--freed-ora/current/F-12/config-debug64
-rw-r--r--freed-ora/current/F-12/config-i686-PAE37
-rw-r--r--freed-ora/current/F-12/config-ia64-generic206
-rw-r--r--freed-ora/current/F-12/config-nodebug64
-rw-r--r--freed-ora/current/F-12/config-powerpc-generic326
-rw-r--r--freed-ora/current/F-12/config-powerpc32-generic184
-rw-r--r--freed-ora/current/F-12/config-powerpc32-smp4
-rw-r--r--freed-ora/current/F-12/config-powerpc64176
-rw-r--r--freed-ora/current/F-12/config-rhel-generic204
-rw-r--r--freed-ora/current/F-12/config-s390x227
-rw-r--r--freed-ora/current/F-12/config-sparc64-generic196
-rw-r--r--freed-ora/current/F-12/config-x86-generic476
-rw-r--r--freed-ora/current/F-12/config-x86_64-generic387
-rw-r--r--freed-ora/current/F-12/crypto-add-async-hash-testing.patch111
-rw-r--r--freed-ora/current/F-12/crypto-testmgr-add-null-test-for-aesni.patch138
-rw-r--r--freed-ora/current/F-12/crystalhd-2.6.34-staging.patch8287
-rw-r--r--freed-ora/current/F-12/die-floppy-die.patch18
-rw-r--r--freed-ora/current/F-12/drm-i915-add-reclaimable-to-page-allocations.patch48
-rw-r--r--freed-ora/current/F-12/drm-i915-fix-hibernate-memory-corruption.patch41
-rw-r--r--freed-ora/current/F-12/drm-i915-resume-force-mode.patch50
-rw-r--r--freed-ora/current/F-12/drm-intel-945gm-stability-fixes.patch117
-rw-r--r--freed-ora/current/F-12/drm-intel-acpi-populate-didl.patch70
-rw-r--r--freed-ora/current/F-12/drm-intel-big-hammer.patch16
-rw-r--r--freed-ora/current/F-12/drm-intel-make-lvds-work.patch19
-rw-r--r--freed-ora/current/F-12/drm-intel-next.patch1
-rw-r--r--freed-ora/current/F-12/drm-intel-no-tv-hotplug.patch11
-rw-r--r--freed-ora/current/F-12/drm-next.patch30739
-rw-r--r--freed-ora/current/F-12/drm-nouveau-d620.patch121
-rw-r--r--freed-ora/current/F-12/drm-nouveau-kconfig.patch11
-rw-r--r--freed-ora/current/F-12/drm-nouveau-mutex.patch56
-rw-r--r--freed-ora/current/F-12/drm-nouveau-nva3-noaccel.patch105
-rw-r--r--freed-ora/current/F-12/drm-nouveau-safetile-getparam.patch26
-rw-r--r--freed-ora/current/F-12/drm-nouveau-tvout-disable.patch57
-rw-r--r--freed-ora/current/F-12/drm-nouveau-update.patch306
-rw-r--r--freed-ora/current/F-12/drm-radeon-pm.patch586
-rw-r--r--freed-ora/current/F-12/drm-upgrayed-fixes.patch1092
-rw-r--r--freed-ora/current/F-12/drm-upgrayedd.patch4
-rw-r--r--freed-ora/current/F-12/ext4-fix-freeze-deadlock-under-io.patch49
-rw-r--r--freed-ora/current/F-12/ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch34
-rwxr-xr-xfreed-ora/current/F-12/find-provides44
-rw-r--r--freed-ora/current/F-12/fix-abrtd.patch774
-rw-r--r--freed-ora/current/F-12/fix-ima-null-ptr-deref.patch54
-rw-r--r--freed-ora/current/F-12/genkey7
-rw-r--r--freed-ora/current/F-12/git-bluetooth.patch3344
-rw-r--r--freed-ora/current/F-12/git-cpufreq.patch0
-rw-r--r--freed-ora/current/F-12/git-linus.diff0
-rw-r--r--freed-ora/current/F-12/hda_intel-prealloc-4mb-dmabuffer.patch35
-rw-r--r--freed-ora/current/F-12/hdpvr-ir-enable.patch213
-rw-r--r--freed-ora/current/F-12/hid-01-usbhid-initialize-interface-pointers-early-enough.patch40
-rw-r--r--freed-ora/current/F-12/hid-02-fix-suspend-crash-by-moving-initializations-earlier.patch53
-rw-r--r--freed-ora/current/F-12/ice1712-fix-revo71-mixer-names.patch42
-rw-r--r--freed-ora/current/F-12/inotify-fix-inotify-oneshot-support.patch25
-rw-r--r--freed-ora/current/F-12/inotify-send-IN_UNMOUNT-events.patch29
-rw-r--r--freed-ora/current/F-12/iwlwifi-fix-internal-scan-race.patch123
-rw-r--r--freed-ora/current/F-12/iwlwifi-fix-scan-races.patch139
-rw-r--r--freed-ora/current/F-12/iwlwifi-manage-QoS-by-mac-stack.patch100
-rw-r--r--freed-ora/current/F-12/iwlwifi-recover_from_tx_stall.patch13
-rw-r--r--freed-ora/current/F-12/iwlwifi-reset-card-during-probe.patch167
-rw-r--r--freed-ora/current/F-12/iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch38
-rw-r--r--freed-ora/current/F-12/iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch82
-rw-r--r--freed-ora/current/F-12/iwlwifi_-Recover-TX-flow-failure.patch137
-rw-r--r--freed-ora/current/F-12/iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch374
-rw-r--r--freed-ora/current/F-12/iwlwifi_-add-internal-short-scan-support-for-3945.patch85
-rw-r--r--freed-ora/current/F-12/iwlwifi_-code-cleanup-for-connectivity-recovery.patch230
-rw-r--r--freed-ora/current/F-12/iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch146
-rw-r--r--freed-ora/current/F-12/iwlwifi_-move-plcp-check-to-separated-function.patch179
-rw-r--r--freed-ora/current/F-12/iwlwifi_-multiple-force-reset-mode.patch152
-rw-r--r--freed-ora/current/F-12/iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch120
-rw-r--r--freed-ora/current/F-12/kernel.spec173
-rw-r--r--freed-ora/current/F-12/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch49
-rw-r--r--freed-ora/current/F-12/l2tp-fix-oops-in-pppol2tp_xmit.patch78
-rw-r--r--freed-ora/current/F-12/linux-2.6-acpi-video-dos.patch17
-rw-r--r--freed-ora/current/F-12/linux-2.6-ata-quirk.patch58
-rw-r--r--freed-ora/current/F-12/linux-2.6-autoload-wmi.patch244
-rw-r--r--freed-ora/current/F-12/linux-2.6-b43_-Rewrite-DMA-Tx-status-handling-sanity-checks.patch182
-rw-r--r--freed-ora/current/F-12/linux-2.6-block-silently-error-unsupported-empty-barriers-too.patch48
-rw-r--r--freed-ora/current/F-12/linux-2.6-btrfs-fix-acl.patch25
-rw-r--r--freed-ora/current/F-12/linux-2.6-build-nonintconfig.patch128
-rw-r--r--freed-ora/current/F-12/linux-2.6-cantiga-iommu-gfx.patch26
-rw-r--r--freed-ora/current/F-12/linux-2.6-compile-fixes.patch6
-rw-r--r--freed-ora/current/F-12/linux-2.6-crash-driver.patch363
-rw-r--r--freed-ora/current/F-12/linux-2.6-debug-always-inline-kzalloc.patch25
-rw-r--r--freed-ora/current/F-12/linux-2.6-debug-nmi-timeout.patch45
-rw-r--r--freed-ora/current/F-12/linux-2.6-debug-sizeof-structs.patch31
-rw-r--r--freed-ora/current/F-12/linux-2.6-debug-taint-vm.patch65
-rw-r--r--freed-ora/current/F-12/linux-2.6-debug-vm-would-have-oomkilled.patch52
-rw-r--r--freed-ora/current/F-12/linux-2.6-defaults-acpi-video.patch13
-rw-r--r--freed-ora/current/F-12/linux-2.6-defaults-alsa-hda-beep-off.patch13
-rw-r--r--freed-ora/current/F-12/linux-2.6-defaults-aspm.patch12
-rw-r--r--freed-ora/current/F-12/linux-2.6-defaults-pci_no_msi.patch92
-rw-r--r--freed-ora/current/F-12/linux-2.6-defaults-pciehp.patch13
-rw-r--r--freed-ora/current/F-12/linux-2.6-dell-laptop-rfkill-fix.patch323
-rw-r--r--freed-ora/current/F-12/linux-2.6-driver-level-usb-autosuspend.diff62
-rw-r--r--freed-ora/current/F-12/linux-2.6-e1000-ich9.patch27
-rw-r--r--freed-ora/current/F-12/linux-2.6-enable-btusb-autosuspend.patch18
-rw-r--r--freed-ora/current/F-12/linux-2.6-execshield.patch987
-rw-r--r--freed-ora/current/F-12/linux-2.6-firewire-git-pending.patch4
-rw-r--r--freed-ora/current/F-12/linux-2.6-firewire-git-update.patch3682
-rw-r--r--freed-ora/current/F-12/linux-2.6-g5-therm-shutdown.patch70
-rw-r--r--freed-ora/current/F-12/linux-2.6-hotfixes.patch1
-rw-r--r--freed-ora/current/F-12/linux-2.6-imac-transparent-bridge.patch15
-rw-r--r--freed-ora/current/F-12/linux-2.6-input-fix-toshiba-hotkeys.patch278
-rw-r--r--freed-ora/current/F-12/linux-2.6-input-hid-quirk-egalax.patch41
-rw-r--r--freed-ora/current/F-12/linux-2.6-input-kill-stupid-messages.patch17
-rw-r--r--freed-ora/current/F-12/linux-2.6-ksm-kvm.patch314
-rw-r--r--freed-ora/current/F-12/linux-2.6-makefile-after_link.patch57
-rw-r--r--freed-ora/current/F-12/linux-2.6-nfs4-callback-hidden.patch20
-rw-r--r--freed-ora/current/F-12/linux-2.6-nfsd4-proots.patch226
-rw-r--r--freed-ora/current/F-12/linux-2.6-pci-cacheline-sizing.patch41
-rw-r--r--freed-ora/current/F-12/linux-2.6-pciehp-update.patch147
-rw-r--r--freed-ora/current/F-12/linux-2.6-phylib-autoload.patch406
-rw-r--r--freed-ora/current/F-12/linux-2.6-ps3-storage-alias.patch7
-rw-r--r--freed-ora/current/F-12/linux-2.6-revert-dvb-net-kabi-change.patch149
-rw-r--r--freed-ora/current/F-12/linux-2.6-rfkill-all.patch52
-rw-r--r--freed-ora/current/F-12/linux-2.6-selinux-mprotect-checks.patch41
-rw-r--r--freed-ora/current/F-12/linux-2.6-serial-460800.patch70
-rw-r--r--freed-ora/current/F-12/linux-2.6-silence-acpi-blacklist.patch25
-rw-r--r--freed-ora/current/F-12/linux-2.6-silence-fbcon-logo.patch42
-rw-r--r--freed-ora/current/F-12/linux-2.6-silence-noise.patch66
-rw-r--r--freed-ora/current/F-12/linux-2.6-sparc-selinux-mprotect-checks.patch21
-rw-r--r--freed-ora/current/F-12/linux-2.6-tracehook.patch368
-rw-r--r--freed-ora/current/F-12/linux-2.6-upstream-reverts.patch2436
-rw-r--r--freed-ora/current/F-12/linux-2.6-usb-uvc-autosuspend.diff19
-rw-r--r--freed-ora/current/F-12/linux-2.6-usb-wwan-update.patch1634
-rw-r--r--freed-ora/current/F-12/linux-2.6-utrace-ptrace.patch1825
-rw-r--r--freed-ora/current/F-12/linux-2.6-utrace.patch4163
-rw-r--r--freed-ora/current/F-12/linux-2.6-v4l-dvb-add-kworld-a340-support.patch151
-rw-r--r--freed-ora/current/F-12/linux-2.6-v4l-dvb-add-lgdt3304-support.patch350
-rw-r--r--freed-ora/current/F-12/linux-2.6-v4l-dvb-update.patch366
-rw-r--r--freed-ora/current/F-12/linux-2.6-vio-modalias.patch32
-rw-r--r--freed-ora/current/F-12/linux-2.6-x86-64-fbdev-primary.patch49
-rw-r--r--freed-ora/current/F-12/linux-2.6.29-sparc-IOC_TYPECHECK.patch21
-rw-r--r--freed-ora/current/F-12/linux-2.6.30-hush-rom-warning.patch27
-rw-r--r--freed-ora/current/F-12/linux-2.6.30-no-pcspkr-modalias.patch11
-rw-r--r--freed-ora/current/F-12/mac80211-explicitly-disable-enable-QoS.patch358
-rwxr-xr-xfreed-ora/current/F-12/merge.pl66
-rw-r--r--freed-ora/current/F-12/mirrors6
-rw-r--r--freed-ora/current/F-12/patch-libre-2.6.32.16.bz2.sign7
-rw-r--r--freed-ora/current/F-12/patch-libre-2.6.32.16.xdeltabin432 -> 0 bytes
-rw-r--r--freed-ora/current/F-12/patch-libre-2.6.32.16.xdelta.sign7
-rw-r--r--freed-ora/current/F-12/patch-libre-2.6.32.18.bz2.sign7
-rw-r--r--freed-ora/current/F-12/patch-libre-2.6.32.18.xdeltabin0 -> 338 bytes
-rw-r--r--freed-ora/current/F-12/patch-libre-2.6.32.18.xdelta.sign7
-rw-r--r--freed-ora/current/F-12/pci-acpi-disable-aspm-if-no-osc.patch55
-rw-r--r--freed-ora/current/F-12/pci-aspm-dont-enable-too-early.patch50
-rw-r--r--freed-ora/current/F-12/perf12
-rw-r--r--freed-ora/current/F-12/scripts/CVS/Entries25
-rw-r--r--freed-ora/current/F-12/scripts/CVS/Repository1
-rw-r--r--freed-ora/current/F-12/scripts/CVS/Root1
-rw-r--r--freed-ora/current/F-12/scripts/CVS/Tag1
-rwxr-xr-xfreed-ora/current/F-12/scripts/bumpspecfile.py71
-rwxr-xr-xfreed-ora/current/F-12/scripts/check-TODO.sh27
-rwxr-xr-xfreed-ora/current/F-12/scripts/combine.sh34
-rw-r--r--freed-ora/current/F-12/scripts/configcommon.pl82
-rw-r--r--freed-ora/current/F-12/scripts/configdiff.pl76
-rw-r--r--freed-ora/current/F-12/scripts/cross-amd64.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-i586.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-i686.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-ia64.sh2
-rw-r--r--freed-ora/current/F-12/scripts/cross-iseries.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-ppc.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-ppc64.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-ppc8260.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-ppc8560.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-pseries.sh3
-rw-r--r--freed-ora/current/F-12/scripts/cross-s390.sh2
-rw-r--r--freed-ora/current/F-12/scripts/cross-s390x.sh2
-rwxr-xr-xfreed-ora/current/F-12/scripts/get-snapshot.sh35
-rwxr-xr-xfreed-ora/current/F-12/scripts/grab-logs.sh16
-rwxr-xr-xfreed-ora/current/F-12/scripts/newpatch.sh21
-rwxr-xr-xfreed-ora/current/F-12/scripts/pull-upstreams.sh13
-rwxr-xr-xfreed-ora/current/F-12/scripts/rebase.sh199
-rwxr-xr-xfreed-ora/current/F-12/scripts/reconfig.sh26
-rw-r--r--freed-ora/current/F-12/scripts/rediffall.pl64
-rwxr-xr-xfreed-ora/current/F-12/scripts/sort-config222
-rw-r--r--freed-ora/current/F-12/sky2-optima-add-missing-write-bits.patch46
-rw-r--r--freed-ora/current/F-12/sky2-optima-add-register-definitions.patch281
-rw-r--r--freed-ora/current/F-12/sky2-optima-fix-tcp-offload.patch33
-rw-r--r--freed-ora/current/F-12/sky2-optima-print-chip-name.patch27
-rw-r--r--freed-ora/current/F-12/sky2-optima-support.patch157
-rw-r--r--freed-ora/current/F-12/sources2
-rw-r--r--freed-ora/current/F-12/ssb_check_for_sprom.patch185
-rw-r--r--freed-ora/current/F-12/thinkpad-acpi-add-x100e.patch11
-rw-r--r--freed-ora/current/F-12/thinkpad-acpi-fix-backlight.patch56
-rw-r--r--freed-ora/current/F-12/upstream2
-rw-r--r--freed-ora/current/F-12/upstream-key.gpg1597
-rw-r--r--freed-ora/current/F-12/via-hwmon-temp-sensor.patch391
-rw-r--r--freed-ora/current/F-12/viafb-neuter-device-table.patch21
-rw-r--r--freed-ora/current/F-12/wmi-check-find_guid-return-value-to-prevent-oops.patch36
-rw-r--r--freed-ora/current/F-12/wmi-survive-bios-with-duplicate-guids.patch76
203 files changed, 44829 insertions, 32685 deletions
diff --git a/freed-ora/current/F-12/.cvsignore b/freed-ora/current/F-12/.cvsignore
deleted file mode 100644
index 1fc20c4b0..000000000
--- a/freed-ora/current/F-12/.cvsignore
+++ /dev/null
@@ -1,12 +0,0 @@
-clog
-.shared-srctree
-GNUmakefile
-kernel-2.6.*.config
-temp-*
-deblob-main
-deblob-2.6.32
-deblob-check
-freedo.patch
-kernel-2.6.32
-linux-2.6.32-libre1.*
-patch-libre-2.6.32.16.*
diff --git a/freed-ora/current/F-12/.gitignore b/freed-ora/current/F-12/.gitignore
new file mode 100644
index 000000000..bab15b175
--- /dev/null
+++ b/freed-ora/current/F-12/.gitignore
@@ -0,0 +1,6 @@
+.svn
+linux-*.tar.bz2
+patch-*.bz2
+clog
+*.rpm
+kernel-2.6.*/
diff --git a/freed-ora/current/F-12/CVS/Entries b/freed-ora/current/F-12/CVS/Entries
deleted file mode 100644
index 0a628fdc8..000000000
--- a/freed-ora/current/F-12/CVS/Entries
+++ /dev/null
@@ -1,188 +0,0 @@
-D/scripts////
-/linux-2.6.32.tar.bz2.sign/-1.1/Mon Jan 4 15:44:16 2010//Tkernel-2_6_32_2-1_fc12
-/patch-2.6.32.16.bz2.sign/-1.1/Tue Jul 6 08:24:24 2010//Tkernel-2_6_32_16-140_fc12
-/.cvsignore/1.1161/dummy timestamp from new-entry//Tkernel-2_6_32_16-154_fc12
-/Makefile/1.116/dummy timestamp from new-entry//Tkernel-2_6_32_16-154_fc12
-/Makefile.config/1.73/Tue Jan 5 02:21:39 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/TODO/1.67/Tue Jan 5 02:22:16 2010//Tkernel-2_6_32_16-154_fc12
-/acpi-ec-add-delay-before-write.patch/1.1/Wed Apr 21 14:30:16 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/add-appleir-usb-driver.patch/1.1/Wed Jan 20 15:50:30 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch/1.2/Tue Jun 15 19:10:16 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch/1.2/Sat Jul 24 05:04:22 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/config-arm/1.5/Tue Jan 5 02:22:16 2010//Tkernel-2_6_32_16-154_fc12
-/config-debug/1.32/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-generic/1.348/dummy timestamp from new-entry//Tkernel-2_6_32_16-154_fc12
-/config-i686-PAE/1.5/Mon Jul 26 12:31:15 2010//Tkernel-2_6_32_16-154_fc12
-/config-ia64-generic/1.28/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-nodebug/1.42/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-powerpc-generic/1.52/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-powerpc32-generic/1.37/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-powerpc32-smp/1.2/Tue Jan 5 02:21:39 2010//Tkernel-2_6_32_16-154_fc12
-/config-powerpc64/1.37/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-rhel-generic/1.18/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-s390x/1.20/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-sparc64-generic/1.30/Thu Jan 7 10:40:03 2010//Tkernel-2_6_32_16-154_fc12
-/config-x86-generic/1.96/Mon Jul 26 12:31:15 2010//Tkernel-2_6_32_16-154_fc12
-/config-x86_64-generic/1.102/Tue Apr 13 16:38:04 2010//Tkernel-2_6_32_16-154_fc12
-/crypto-add-async-hash-testing.patch/1.1/Wed Jul 21 12:19:01 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/crypto-testmgr-add-null-test-for-aesni.patch/1.1/Wed Jul 21 04:28:55 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/crystalhd-2.6.34-staging.patch/1.2/Wed Jan 13 07:03:57 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/die-floppy-die.patch/1.1/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-connection-cache.patch/1.2/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-i915-add-reclaimable-to-page-allocations.patch/1.1/Sat Jul 24 00:53:22 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-i915-fix-hibernate-memory-corruption.patch/1.1/Sat Jul 24 00:53:22 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-i915-resume-force-mode.patch/1.2/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-intel-945gm-stability-fixes.patch/1.2/Sat Jul 24 02:38:42 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-intel-acpi-populate-didl.patch/1.1/Thu Apr 1 15:06:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-intel-big-hammer.patch/1.3/Mon Jan 4 15:44:10 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-intel-make-lvds-work.patch/1.1/Thu Apr 1 15:06:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-intel-next.patch/1.11/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-intel-no-tv-hotplug.patch/1.3/Mon Jan 11 16:41:49 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-next.patch/1.20/dummy timestamp from new-entry/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-nouveau-d620.patch/1.1/Mon Mar 29 00:59:00 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-nouveau-g80-ctxprog.patch/1.1/dummy timestamp from new-entry/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-nouveau-kconfig.patch/1.2/Thu Feb 18 07:42:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-nouveau-mutex.patch/1.1/Wed Feb 17 05:14:04 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-nouveau-safetile-getparam.patch/1.2/Thu Feb 18 07:42:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-nouveau-tvout-disable.patch/1.2/Thu Feb 18 07:42:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-nouveau-update.patch/1.4/Sat Feb 27 15:29:34 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-radeon-pm.patch/1.2/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/drm-upgrayedd.patch/1.6/dummy timestamp from new-entry/-kb/Tkernel-2_6_32_16-154_fc12
-/ethtool-fix-buffer-overflow.patch/1.1/Wed Jul 7 03:41:22 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/ext4-issue-discard-operation-before-releasing-blocks.patch/1.1/Tue Apr 27 12:37:52 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch/1.1/Fri Jul 23 18:10:50 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/find-provides/1.18/Tue Jan 5 02:21:39 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/fix-abrtd.patch/1.2/Mon Apr 5 22:41:01 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/fix-ima-null-ptr-deref.patch/1.1/Sun Feb 7 22:24:05 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/gen-patches/1.6/Tue Jan 5 02:22:18 2010//Tkernel-2_6_32_16-154_fc12
-/genkey/1.6/Tue Jan 5 02:21:39 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/git-bluetooth.patch/1.2/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/git-cpufreq.patch/1.6/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/git-linus.diff/1.17/Tue Jan 5 02:21:39 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/hda_intel-prealloc-4mb-dmabuffer.patch/1.1/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/hdpvr-ir-enable.patch/1.8/Thu Jul 8 06:49:50 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/ice1712-fix-revo71-mixer-names.patch/1.1/Thu Feb 18 20:13:23 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/inotify-fix-inotify-oneshot-support.patch/1.2/Wed Jul 21 04:28:55 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/inotify-send-IN_UNMOUNT-events.patch/1.1/Fri Jul 16 14:38:15 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch/1.1/Tue Jun 15 18:40:32 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi-fix-internal-scan-race.patch/1.1/Tue May 25 17:35:08 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi-fix-scan-races.patch/1.1/Tue May 25 17:35:08 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi-manage-QoS-by-mac-stack.patch/1.1/Sun Jun 13 12:30:23 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi-recover_from_tx_stall.patch/1.1/Tue May 25 17:35:08 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi-reset-card-during-probe.patch/1.1/Mon Mar 29 20:09:26 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch/1.1/Tue Apr 13 15:03:16 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch/1.1/Tue Apr 13 15:03:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-Recover-TX-flow-failure.patch/1.1/Tue Apr 13 15:03:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-Recover-TX-flow-stall-due-to-stuck-queue.patch/1.2/dummy timestamp from new-entry/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-Tune-radio-to-prevent-unexpected-behavior.patch/1.2/dummy timestamp from new-entry/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch/1.1/Tue Apr 13 15:03:19 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-add-internal-short-scan-support-for-3945.patch/1.1/Tue Apr 13 15:03:19 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-code-cleanup-for-connectivity-recovery.patch/1.1/Tue Apr 13 15:03:20 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch/1.1/Tue Apr 13 15:03:21 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-move-plcp-check-to-separated-function.patch/1.1/Tue Apr 13 15:03:21 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-multiple-force-reset-mode.patch/1.1/Tue Apr 13 15:03:21 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch/1.1/Tue Apr 13 15:03:21 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/kabitool/1.1/Tue Jan 5 02:21:39 2010//Tkernel-2_6_32_16-154_fc12
-/kernel.spec/1.2116/Result of merge//Tkernel-2_6_32_16-154_fc12
-/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch/1.1/Tue Jul 27 19:13:49 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/l2tp-fix-oops-in-pppol2tp_xmit.patch/1.1/Wed Jun 23 12:22:00 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-acpi-video-dos.patch/1.3/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-ata-quirk.patch/1.2/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-autoload-wmi.patch/1.2/Thu Jan 7 10:40:03 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-b43_-Rewrite-DMA-Tx-status-handling-sanity-checks.patch/1.1/Wed Mar 17 18:17:34 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-block-silently-error-unsupported-empty-barriers-too.patch/1.1/Tue Jan 5 02:21:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-btrfs-fix-acl.patch/1.1/Thu Jan 14 20:54:14 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-build-nonintconfig.patch/1.16/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-cantiga-iommu-gfx.patch/1.1/Tue Feb 2 20:59:23 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-compile-fixes.patch/1.197/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-crash-driver.patch/1.16/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-debug-always-inline-kzalloc.patch/1.2/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-debug-nmi-timeout.patch/1.5/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-debug-sizeof-structs.patch/1.8/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-debug-taint-vm.patch/1.25/Thu Jan 7 10:40:03 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-debug-vm-would-have-oomkilled.patch/1.5/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-defaults-acpi-video.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-defaults-alsa-hda-beep-off.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-defaults-aspm.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-defaults-pci_no_msi.patch/1.6/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-defaults-pciehp.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-dell-laptop-rfkill-fix.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-driver-level-usb-autosuspend.diff/1.2/Tue Jan 5 02:22:18 2010//Tkernel-2_6_32_16-154_fc12
-/linux-2.6-e1000-ich9.patch/1.4/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-enable-btusb-autosuspend.patch/1.1/Mon Jan 4 15:44:12 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-execshield.patch/1.111/Tue Jan 19 04:24:21 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-ext4-quota-metadata-reservation.patch/1.1/Mon Jul 26 17:33:34 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-firewire-git-pending.patch/1.35/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-firewire-git-update.patch/1.22/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-fix-usb-serial-autosuspend.diff/1.4/Mon Jan 4 15:44:12 2010//Tkernel-2_6_32_16-154_fc12
-/linux-2.6-g5-therm-shutdown.patch/1.1/Tue Jan 5 02:21:40 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-hotfixes.patch/1.7/Tue Apr 27 22:58:17 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-imac-transparent-bridge.patch/1.1/Tue Jan 5 02:21:40 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-input-fix-toshiba-hotkeys.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-input-hid-quirk-egalax.patch/1.1/Mon Feb 1 00:05:38 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-input-kill-stupid-messages.patch/1.5/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-intel-agp-clear-gtt.patch/1.1/Mon Jan 4 15:44:12 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-ksm-kvm.patch/1.5/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-mac80211-age-scan-results-on-resume.patch/1.3/Mon Jan 4 15:44:12 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-makefile-after_link.patch/1.2/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-nfs4-callback-hidden.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-nfsd4-proots.patch/1.6/Mon Jan 4 15:44:13 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-pci-cacheline-sizing.patch/1.2/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-pciehp-update.patch/1.7/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-phylib-autoload.patch/1.1/Wed Apr 14 20:57:27 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-ps3-storage-alias.patch/1.1/Tue Jan 5 02:21:40 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-qcserial-autosuspend.diff/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/linux-2.6-revert-dvb-net-kabi-change.patch/1.3/Mon Jan 4 15:44:13 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-rfkill-all.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-rt2x00-asus-leds.patch/1.3/Mon Jan 4 15:44:13 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-selinux-mprotect-checks.patch/1.8/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-serial-460800.patch/1.3/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-silence-acpi-blacklist.patch/1.2/Tue Jan 5 02:22:18 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-silence-fbcon-logo.patch/1.2/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-silence-noise.patch/1.22/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-sparc-selinux-mprotect-checks.patch/1.3/Fri Mar 12 23:47:06 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-tracehook.patch/1.11/Fri May 21 22:17:31 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-upstream-reverts.patch/1.13/Tue Apr 27 22:58:25 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-usb-uvc-autosuspend.diff/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/linux-2.6-usb-wwan-update.patch/1.2/Wed Jul 7 17:31:00 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-utrace-ptrace.patch/1.2/Fri May 21 22:17:31 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-utrace.patch/1.121/Fri May 28 05:21:59 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-v4l-dvb-add-kworld-a340-support.patch/1.2/Fri May 28 05:21:59 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-v4l-dvb-add-lgdt3304-support.patch/1.1/Thu May 13 04:32:39 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-v4l-dvb-experimental.patch/1.10/dummy timestamp from new-entry/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-v4l-dvb-fixes.patch/1.15/dummy timestamp from new-entry/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-v4l-dvb-rebase-gspca-to-latest.patch/1.3/Mon Jan 25 22:10:46 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-v4l-dvb-update.patch/1.16/Tue Jan 5 02:22:19 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-vio-modalias.patch/1.2/Tue Jan 5 02:21:40 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/linux-2.6-x86-64-fbdev-primary.patch/1.1/Wed Jan 13 03:12:45 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6.29-sparc-IOC_TYPECHECK.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6.30-hush-rom-warning.patch/1.2/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/linux-2.6.30-no-pcspkr-modalias.patch/1.1/Tue Jan 5 02:21:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/lirc-2.6.32.patch/1.10/dummy timestamp from new-entry/-kb/Tkernel-2_6_32_16-154_fc12
-/mac80211-do-not-wipe-out-old-supported-rates.patch/1.1/Sun Jun 13 12:30:23 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/mac80211-explicitly-disable-enable-QoS.patch/1.1/Sun Jun 13 12:30:23 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch/1.1/Sun Jun 13 12:30:23 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/merge.pl/1.9/Tue Jan 5 02:21:40 2010/-ko/Tkernel-2_6_32_16-154_fc12
-/mirrors/1.6/dummy timestamp from new-entry//Tkernel-2_6_32_16-154_fc12
-/noautobuild/1.2/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/pci-acpi-disable-aspm-if-no-osc.patch/1.1/Thu Jul 22 02:33:50 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/pci-aspm-dont-enable-too-early.patch/1.1/Thu Jul 22 02:33:50 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/perf/1.6/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/sched-fix-over-scheduling-bug.patch/1.1/Wed Jul 7 03:41:23 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/sky2-optima-add-missing-write-bits.patch/1.1/Tue Jun 29 00:05:52 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/sky2-optima-add-register-definitions.patch/1.1/Tue Jun 29 00:05:52 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/sky2-optima-fix-tcp-offload.patch/1.1/Tue Jun 29 00:05:52 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/sky2-optima-print-chip-name.patch/1.1/Tue Jun 29 00:05:52 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/sky2-optima-support.patch/1.1/Tue Jun 29 00:05:52 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/sources/1.1119/dummy timestamp from new-entry//Tkernel-2_6_32_16-154_fc12
-/ssb_check_for_sprom.patch/1.4/Wed Mar 31 21:16:53 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/thinkpad-acpi-add-x100e.patch/1.1/Wed Apr 21 14:13:22 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/thinkpad-acpi-fix-backlight.patch/1.1/Mon May 17 20:34:08 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/upstream/1.1033/dummy timestamp from new-entry//Tkernel-2_6_32_16-154_fc12
-/upstream-key.gpg/1.1/dummy timestamp from new-entry//Tkernel-2_6_32_16-154_fc12
-/usb-obey-the-sysfs-power-wakeup-setting.patch/1.1/Mon Jul 26 18:33:57 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/via-hwmon-temp-sensor.patch/1.4/Tue Jan 5 02:22:20 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/viafb-neuter-device-table.patch/1.1/Mon Feb 22 11:40:40 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/wmi-check-find_guid-return-value-to-prevent-oops.patch/1.1/Wed Jan 6 02:34:06 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/wmi-survive-bios-with-duplicate-guids.patch/1.1/Wed Jan 6 02:34:06 2010/-kb/Tkernel-2_6_32_16-154_fc12
-/xfs-prevent-swapext-from-operating-on-write-only-files.patch/1.1/Tue Jul 27 19:55:59 2010/-kb/Tkernel-2_6_32_16-154_fc12
diff --git a/freed-ora/current/F-12/CVS/Repository b/freed-ora/current/F-12/CVS/Repository
deleted file mode 100644
index da8de6807..000000000
--- a/freed-ora/current/F-12/CVS/Repository
+++ /dev/null
@@ -1 +0,0 @@
-rpms/kernel/F-12
diff --git a/freed-ora/current/F-12/CVS/Root b/freed-ora/current/F-12/CVS/Root
deleted file mode 100644
index d426f1a67..000000000
--- a/freed-ora/current/F-12/CVS/Root
+++ /dev/null
@@ -1 +0,0 @@
-:pserver:anonymous@cvs.fedoraproject.org.:/cvs/pkgs
diff --git a/freed-ora/current/F-12/CVS/Tag b/freed-ora/current/F-12/CVS/Tag
deleted file mode 100644
index 9b507f95b..000000000
--- a/freed-ora/current/F-12/CVS/Tag
+++ /dev/null
@@ -1 +0,0 @@
-Nkernel-2_6_32_16-154_fc12
diff --git a/freed-ora/current/F-12/Makefile b/freed-ora/current/F-12/Makefile
index 7fa44b0b6..46e734236 100644
--- a/freed-ora/current/F-12/Makefile
+++ b/freed-ora/current/F-12/Makefile
@@ -1,6 +1,4 @@
# Makefile for source rpm: kernel
-# $Id: Makefile,v 1.116 2010/01/05 11:08:24 cebbert Exp $
-NAME := kernel
SPECFILE := kernel.spec
# use noarch for make prep instead of the current CPU
@@ -11,25 +9,12 @@ PREPARCH = noarch
# we only check the .sign signatures
UPSTREAM_CHECKS = sign
-# local targets we need to carry around in addition to the default sources
-TARGETS = download
+.PHONY: help
+help:
+%:
+ @echo "Try fedpkg $@ or something like that"
+ @exit 1
-define find-makefile-common
-for d in common ../common ../../common ; do if [ -f $$d/Makefile.common ] ; then if [ -f $$d/CVS/Root -a -w $$d/Makefile.common ] ; then cd $$d ; cvs -Q update >/dev/null ; fi ; echo "$$d/Makefile.common" ; break ; fi ; done
-endef
-
-MAKEFILE_COMMON := $(shell $(find-makefile-common))
-
-ifeq ($(MAKEFILE_COMMON),)
-# attept a checkout
-define checkout-makefile-common
-test -f CVS/Root && { cvs -Q -d $$(cat CVS/Root) checkout common && echo "common/Makefile.common" ; } || { echo "ERROR: I can't figure out how to checkout the 'common' module." ; exit -1 ; } >&2
-endef
-
-MAKEFILE_COMMON := $(shell $(checkout-makefile-common))
-endif
-
-include $(MAKEFILE_COMMON)
include Makefile.config
ifndef KVERSION
@@ -38,6 +23,9 @@ KVERSION := $(shell awk '$$1 == "%define" && $$2 == "base_sublevel" { \
}' $(SPECFILE))
endif
+prep:
+ fedpkg -v prep --arch=$(PREPARCH)
+
extremedebug:
@perl -pi -e 's/# CONFIG_DEBUG_PAGEALLOC is not set/CONFIG_DEBUG_PAGEALLOC=y/' config-nodebug
@@ -154,9 +142,6 @@ reconfig:
@VERSION=$(KVERSION) make -f Makefile.config configs
@scripts/reconfig.sh
-force-tag: $(SPECFILE) $(COMMON_DIR)/branches
- @$(MAKE) tag TAG_OPTS="-F $(TAG_OPTS)"
-
unused-kernel-patches:
@for f in *.patch; do if [ -e $$f ]; then (egrep -q "^Patch[[:digit:]]+:[[:space:]]+$$f" $(SPECFILE) || echo "Unused: $$f") && egrep -q "^ApplyPatch[[:space:]]+$$f|^ApplyOptionalPatch[[:space:]]+$$f" $(SPECFILE) || echo "Unapplied: $$f"; fi; done
diff --git a/freed-ora/current/F-12/Makefile.config b/freed-ora/current/F-12/Makefile.config
new file mode 100644
index 000000000..53812fa9b
--- /dev/null
+++ b/freed-ora/current/F-12/Makefile.config
@@ -0,0 +1,105 @@
+# Make rules for configuration files.
+#
+# $Id$
+
+CFG = kernel-$(VERSION)
+
+CONFIGFILES = \
+ $(CFG)-i686.config $(CFG)-i686-debug.config \
+ $(CFG)-i686-PAE.config $(CFG)-i686-PAEdebug.config \
+ $(CFG)-x86_64.config $(CFG)-x86_64-debug.config \
+ $(CFG)-s390x.config $(CFG)-arm.config \
+ $(CFG)-ppc.config $(CFG)-ppc-smp.config \
+ $(CFG)-sparc64.config \
+ $(CFG)-ppc64.config $(CFG)-ppc64-debug.config \
+ $(CFG)-ia64.config
+
+PLATFORMS = x86 x86_64 powerpc powerpc32 powerpc64 s390x ia64 sparc64
+TEMPFILES = $(addprefix temp-, $(addsuffix -generic, $(PLATFORMS)))
+
+configs: $(CONFIGFILES)
+ @rm -f kernel-*-config
+ @rm -f $(TEMPFILES)
+ @rm -f temp-generic temp-*-generic temp-*-generic-tmp
+
+# Augment the clean target to clean up our own cruft
+clean ::
+ @rm -fv $(CONFIGFILES) $(TEMPFILES) temp-generic kernel-$(VERSION)*config
+
+temp-generic: config-generic
+ cat config-generic config-nodebug > temp-generic
+
+temp-debug-generic: config-generic
+ cat config-generic config-debug > temp-debug-generic
+
+temp-x86-generic: config-x86-generic temp-generic
+ perl merge.pl $^ > $@
+
+temp-x86-debug-generic: config-x86-generic temp-debug-generic
+ perl merge.pl $^ > $@
+
+temp-x86_64-generic: config-x86_64-generic temp-generic
+ perl merge.pl $^ > $@
+
+temp-x86_64-debug-generic: config-x86_64-generic temp-debug-generic
+ perl merge.pl $^ > $@
+
+temp-sparc64-generic: config-sparc64-generic temp-generic
+ perl merge.pl $^ > $@
+
+temp-powerpc-generic: config-powerpc-generic temp-generic
+ perl merge.pl $^ > $@
+
+temp-powerpc-debug-generic: config-powerpc-generic temp-debug-generic
+ perl merge.pl $^ > $@
+
+temp-powerpc32-generic: config-powerpc32-generic temp-powerpc-generic
+ perl merge.pl $^ > $@
+
+temp-s390-generic: config-s390x temp-generic
+ perl merge.pl $^ > $@
+
+temp-ia64-generic: config-ia64-generic temp-generic
+ perl merge.pl $^ > $@
+
+kernel-$(VERSION)-i686-PAE.config: config-i686-PAE temp-x86-generic
+ perl merge.pl $^ i386 > $@
+
+kernel-$(VERSION)-i686-PAEdebug.config: config-i686-PAE temp-x86-debug-generic
+ perl merge.pl $^ i386 > $@
+
+kernel-$(VERSION)-i686.config: /dev/null temp-x86-generic
+ perl merge.pl $^ i386 > $@
+
+kernel-$(VERSION)-i686-debug.config: /dev/null temp-x86-debug-generic
+ perl merge.pl $^ i386 > $@
+
+kernel-$(VERSION)-x86_64.config: /dev/null temp-x86_64-generic
+ perl merge.pl $^ x86_64 > $@
+
+kernel-$(VERSION)-x86_64-debug.config: /dev/null temp-x86_64-debug-generic
+ perl merge.pl $^ x86_64 > $@
+
+kernel-$(VERSION)-sparc64.config: /dev/null temp-sparc64-generic
+ perl merge.pl $^ sparc64 > $@
+
+kernel-$(VERSION)-ppc64.config: config-powerpc64 temp-powerpc-generic
+ perl merge.pl $^ powerpc > $@
+
+kernel-$(VERSION)-ppc64-debug.config: config-powerpc64 temp-powerpc-debug-generic
+ perl merge.pl $^ powerpc > $@
+
+kernel-$(VERSION)-s390x.config: config-s390x temp-s390-generic
+ perl merge.pl $^ s390 > $@
+
+kernel-$(VERSION)-arm.config: config-arm temp-generic
+ perl merge.pl $^ arm > $@
+
+kernel-$(VERSION)-ppc.config: /dev/null temp-powerpc32-generic
+ perl merge.pl $^ powerpc > $@
+
+kernel-$(VERSION)-ppc-smp.config: config-powerpc32-smp temp-powerpc32-generic
+ perl merge.pl $^ powerpc > $@
+
+kernel-$(VERSION)-ia64.config: /dev/null temp-ia64-generic
+ perl merge.pl $^ ia64 > $@
diff --git a/freed-ora/current/F-12/README.txt b/freed-ora/current/F-12/README.txt
new file mode 100644
index 000000000..482f8ea5b
--- /dev/null
+++ b/freed-ora/current/F-12/README.txt
@@ -0,0 +1,67 @@
+
+ Kernel package tips & tricks.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The kernel is one of the more complicated packages in the distro, and
+for the newcomer, some of the voodoo in the spec file can be somewhat scary.
+This file attempts to document some of the magic.
+
+
+Speeding up make prep
+---------------------
+The kernel is nearly 500MB of source code, and as such, 'make prep'
+takes a while. The spec file employs some trickery so that repeated
+invocations of make prep don't take as long. Ordinarily the %prep
+phase of a package will delete the tree it is about to untar/patch.
+The kernel %prep keeps around an unpatched version of the tree,
+and makes a symlink tree clone of that clean tree and than applies
+the patches listed in the spec to the symlink tree.
+This makes a huge difference if you're doing multiple make preps a day.
+As an added bonus, doing a diff between the clean tree and the symlink
+tree is slightly faster than it would be doing two proper copies of the tree.
+
+
+build logs.
+-----------
+There's a convenience helper script in scripts/grab-logs.sh
+that will grab the build logs from koji for the kernel version reported
+by make verrel
+
+
+config heirarchy.
+-----------------
+Instead of having to maintain a config file for every arch variant we build on,
+the kernel spec uses a nested system of configs. At the top level, is
+config-generic. Add options here that should be present in every possible
+config on all architectures.
+Beneath this are per-arch overrides. For example config-x86-generic add
+additional x86 specific options, and also _override_ any options that were
+set in config-generic.
+There exist two additional overrides, config-debug, and config-nodebug,
+which override -generic, and the per-arch overrides. It is documented
+further below.
+
+debug options.
+--------------
+This is a little complicated, as the purpose & meaning of this changes
+depending on where we are in the release cycle.
+If we are building for a current stable release, 'make release' has
+typically been run already, which sets up the following..
+- Two builds occur, a 'kernel' and a 'kernel-debug' flavor.
+- kernel-debug will get various heavyweight debugging options like
+ lockdep etc turned on.
+
+If we are building for rawhide, 'make debug' has been run, which changes
+the status quo to:
+- We only build one kernel 'kernel'
+- The debug options from 'config-debug' are always turned on.
+This is done to increase coverage testing, as not many people actually
+run kernel-debug.
+
+To add new debug options, add an option to _both_ config-debug and config-nodebug,
+and also new stanzas to the Makefile 'debug' and 'release' targets.
+
+Sometimes debug options get added to config-generic, or per-arch overrides
+instead of config-[no]debug. In this instance, the options should have no
+discernable performance impact, otherwise they belong in the debug files.
+
diff --git a/freed-ora/current/F-12/acpi-ec-add-delay-before-write.patch b/freed-ora/current/F-12/acpi-ec-add-delay-before-write.patch
new file mode 100644
index 000000000..af49cccbd
--- /dev/null
+++ b/freed-ora/current/F-12/acpi-ec-add-delay-before-write.patch
@@ -0,0 +1,52 @@
+https://bugzilla.kernel.org/show_bug.cgi?id=14733#c41
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 27e0b92..09fbb69 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -226,6 +226,7 @@ static int ec_poll(struct acpi_ec *ec)
+ if (ec_transaction_done(ec))
+ return 0;
+ } else {
++ msleep(1);
+ if (wait_event_timeout(ec->wait,
+ ec_transaction_done(ec),
+ msecs_to_jiffies(1)))
+@@ -233,8 +234,8 @@ static int ec_poll(struct acpi_ec *ec)
+ }
+ advance_transaction(ec, acpi_ec_read_status(ec));
+ } while (time_before(jiffies, delay));
+- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
+- break;
++// if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
++// break;
+ pr_debug(PREFIX "controller reset, restart transaction\n");
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ start_transaction(ec);
+@@ -271,15 +272,25 @@ static int ec_check_ibf0(struct acpi_ec *ec)
+ return (status & ACPI_EC_FLAG_IBF) == 0;
+ }
+
++/* try to clean input buffer with burst_disable transaction */
++static int acpi_ec_clean_buffer(struct acpi_ec *ec)
++{
++ struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
++ .wdata = NULL, .rdata = NULL,
++ .wlen = 0, .rlen = 0};
++ return acpi_ec_transaction_unlocked(ec, &t);
++}
++
+ static int ec_wait_ibf0(struct acpi_ec *ec)
+ {
++
+ unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+ /* interrupt wait manually if GPE mode is not active */
+ while (time_before(jiffies, delay))
+ if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
+ msecs_to_jiffies(1)))
+ return 0;
+- return -ETIME;
++ return acpi_ec_clean_buffer(ec);
+ }
+
+ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
diff --git a/freed-ora/current/F-12/add-appleir-usb-driver.patch b/freed-ora/current/F-12/add-appleir-usb-driver.patch
new file mode 100644
index 000000000..ac68e7115
--- /dev/null
+++ b/freed-ora/current/F-12/add-appleir-usb-driver.patch
@@ -0,0 +1,635 @@
+ Documentation/input/appleir.txt | 46 ++++
+ drivers/hid/hid-apple.c | 4 -
+ drivers/hid/hid-core.c | 5 +-
+ drivers/hid/hid-ids.h | 1 +
+ drivers/input/misc/Kconfig | 13 +
+ drivers/input/misc/Makefile | 1 +
+ drivers/input/misc/appleir.c | 477 +++++++++++++++++++++++++++++++++++++++
+ 7 files changed, 541 insertions(+), 6 deletions(-)
+ create mode 100644 Documentation/input/appleir.txt
+ create mode 100644 drivers/input/misc/appleir.c
+
+diff --git a/Documentation/input/appleir.txt b/Documentation/input/appleir.txt
+new file mode 100644
+index 0000000..0aaf5fe
+--- /dev/null
++++ b/Documentation/input/appleir.txt
+@@ -0,0 +1,46 @@
++Apple IR receiver Driver (appleir)
++----------------------------------
++ Copyright (C) 2009 Bastien Nocera <hadess@hadess.net>
++
++The appleir driver is a kernel input driver to handle Apple's IR
++receivers (and associated remotes) in the kernel.
++
++The driver is an input driver which only handles "official" remotes
++as built and sold by Apple.
++
++Authors
++-------
++
++James McKenzie (original driver)
++Alex Karpenko (05ac:8242 support)
++Greg Kroah-Hartman (cleanups and original submission)
++Bastien Nocera (further cleanups and suspend support)
++
++Supported hardware
++------------------
++
++- All Apple laptops and desktops from 2005 onwards, except:
++ - the unibody Macbook (2009)
++ - Mac Pro (all versions)
++- Apple TV (all revisions)
++
++The remote will only support the 6 buttons of the original remotes
++as sold by Apple. See the next section if you want to use other remotes
++or want to use lirc with the device instead of the kernel driver.
++
++Using lirc (native) instead of the kernel driver
++------------------------------------------------
++
++First, you will need to disable the kernel driver for the receiver.
++
++This can be achieved by passing quirks to the usbhid driver.
++The quirk line would be:
++usbhid.quirks=0x05ac:0x8242:0x08
++
++With 0x05ac being the vendor ID (Apple, you shouldn't need to change this)
++With 0x8242 being the product ID (check the output of lsusb for your hardware)
++And 0x08 being "HID_CONNECT_HIDDEV"
++
++This should force the creation of a hiddev device for the receiver, and
++make it usable under lirc.
++
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 4b96e7a..d1fdcd0 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -353,10 +353,6 @@ static void apple_remove(struct hid_device *hdev)
+ }
+
+ static const struct hid_device_id apple_devices[] = {
+- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL),
+- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT },
+- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4),
+- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE),
+ .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL },
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 7d05c4b..3efb0fa 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1252,8 +1252,6 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
+ static const struct hid_device_id hid_blacklist[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+@@ -1539,6 +1537,9 @@ static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index adbef5d..c399110 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -90,6 +90,7 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
+ #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
+ #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
+ #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
+
+diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
+index a9bb254..51b6684 100644
+--- a/drivers/input/misc/Kconfig
++++ b/drivers/input/misc/Kconfig
+@@ -148,6 +148,19 @@ config INPUT_KEYSPAN_REMOTE
+ To compile this driver as a module, choose M here: the module will
+ be called keyspan_remote.
+
++config INPUT_APPLEIR
++ tristate "Apple infrared receiver (built in)"
++ depends on USB_ARCH_HAS_HCD
++ select USB
++ help
++ Say Y here if you want to use a Apple infrared remote control. All
++ the Apple computers from 2005 onwards include such a port, except
++ the unibody Macbook (2009), and Mac Pros. This receiver is also
++ used in the Apple TV set-top box.
++
++ To compile this driver as a module, choose M here: the module will
++ be called appleir.
++
+ config INPUT_POWERMATE
+ tristate "Griffin PowerMate and Contour Jog support"
+ depends on USB_ARCH_HAS_HCD
+diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
+index a8b8485..041e6f5 100644
+--- a/drivers/input/misc/Makefile
++++ b/drivers/input/misc/Makefile
+@@ -5,6 +5,7 @@
+ # Each configuration option enables a list of files.
+
+ obj-$(CONFIG_INPUT_APANEL) += apanel.o
++obj-$(CONFIG_INPUT_APPLEIR) += appleir.o
+ obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
+ obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
+ obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
+diff --git a/drivers/input/misc/appleir.c b/drivers/input/misc/appleir.c
+new file mode 100644
+index 0000000..6e332ab
+--- /dev/null
++++ b/drivers/input/misc/appleir.c
+@@ -0,0 +1,477 @@
++/*
++ * appleir: USB driver for the apple ir device
++ *
++ * Original driver written by James McKenzie
++ * Ported to recent 2.6 kernel versions by Greg Kroah-Hartman <gregkh@suse.de>
++ *
++ * Copyright (C) 2006 James McKenzie
++ * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
++ * Copyright (C) 2008 Novell Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation, version 2.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/input.h>
++#include <linux/usb/input.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/usb.h>
++#include <linux/usb/input.h>
++#include <asm/unaligned.h>
++#include <asm/byteorder.h>
++
++#define DRIVER_VERSION "v1.2"
++#define DRIVER_AUTHOR "James McKenzie"
++#define DRIVER_DESC "Apple infrared receiver driver"
++#define DRIVER_LICENSE "GPL"
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE(DRIVER_LICENSE);
++
++#define USB_VENDOR_ID_APPLE 0x05ac
++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
++#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
++#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
++
++#define URB_SIZE 32
++
++#define MAX_KEYS 8
++#define MAX_KEYS_MASK (MAX_KEYS - 1)
++
++#define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0)
++
++static int debug;
++module_param(debug, int, 0644);
++MODULE_PARM_DESC(debug, "Enable extra debug messages and information");
++
++struct appleir {
++ struct input_dev *input_dev;
++ u8 *data;
++ dma_addr_t dma_buf;
++ struct usb_device *usbdev;
++ unsigned int flags;
++ struct urb *urb;
++ int timer_initted;
++ struct timer_list key_up_timer;
++ int current_key;
++ char phys[32];
++};
++
++static DEFINE_MUTEX(appleir_mutex);
++
++enum {
++ APPLEIR_OPENED = 0x1,
++ APPLEIR_SUSPENDED = 0x2,
++};
++
++static struct usb_device_id appleir_ids[] = {
++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
++ {}
++};
++MODULE_DEVICE_TABLE(usb, appleir_ids);
++
++/* I have two devices both of which report the following */
++/* 25 87 ee 83 0a + */
++/* 25 87 ee 83 0c - */
++/* 25 87 ee 83 09 << */
++/* 25 87 ee 83 06 >> */
++/* 25 87 ee 83 05 >" */
++/* 25 87 ee 83 03 menu */
++/* 26 00 00 00 00 for key repeat*/
++
++/* Thomas Glanzmann reports the following responses */
++/* 25 87 ee ca 0b + */
++/* 25 87 ee ca 0d - */
++/* 25 87 ee ca 08 << */
++/* 25 87 ee ca 07 >> */
++/* 25 87 ee ca 04 >" */
++/* 25 87 ee ca 02 menu */
++/* 26 00 00 00 00 for key repeat*/
++/* He also observes the following event sometimes */
++/* sent after a key is release, which I interpret */
++/* as a flat battery message */
++/* 25 87 e0 ca 06 flat battery */
++
++/* Alexandre Karpenko reports the following responses for Device ID 0x8242 */
++/* 25 87 ee 47 0b + */
++/* 25 87 ee 47 0d - */
++/* 25 87 ee 47 08 << */
++/* 25 87 ee 47 07 >> */
++/* 25 87 ee 47 04 >" */
++/* 25 87 ee 47 02 menu */
++/* 26 87 ee 47 ** for key repeat (** is the code of the key being held) */
++
++static int keymap[MAX_KEYS] = {
++ KEY_RESERVED,
++ KEY_MENU,
++ KEY_PLAYPAUSE,
++ KEY_FORWARD,
++ KEY_BACK,
++ KEY_VOLUMEUP,
++ KEY_VOLUMEDOWN,
++ KEY_RESERVED,
++};
++
++static void dump_packet(struct appleir *appleir, char *msg, u8 *data, int len)
++{
++ int i;
++
++ printk(KERN_ERR "appleir: %s (%d bytes)", msg, len);
++
++ for (i = 0; i < len; ++i)
++ printk(" %02x", data[i]);
++ printk("\n");
++}
++
++static void key_up(struct appleir *appleir, int key)
++{
++ dbginfo (&appleir->input_dev->dev, "key %d up\n", key);
++ input_report_key(appleir->input_dev, key, 0);
++ input_sync(appleir->input_dev);
++}
++
++static void key_down(struct appleir *appleir, int key)
++{
++ dbginfo (&appleir->input_dev->dev, "key %d down\n", key);
++ input_report_key(appleir->input_dev, key, 1);
++ input_sync(appleir->input_dev);
++}
++
++static void battery_flat(struct appleir *appleir)
++{
++ dev_err(&appleir->input_dev->dev, "possible flat battery?\n");
++}
++
++static void key_up_tick(unsigned long data)
++{
++ struct appleir *appleir = (struct appleir *)data;
++
++ if (appleir->current_key) {
++ key_up(appleir, appleir->current_key);
++ appleir->current_key = 0;
++ }
++}
++
++static void new_data(struct appleir *appleir, u8 *data, int len)
++{
++ static const u8 keydown[] = { 0x25, 0x87, 0xee };
++ static const u8 keyrepeat[] = { 0x26, };
++ static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 };
++
++ if (debug)
++ dump_packet(appleir, "received", data, len);
++
++ if (len != 5)
++ return;
++
++ if (!memcmp(data, keydown, sizeof(keydown))) {
++ /*If we already have a key down, take it up before marking */
++ /*this one down */
++ if (appleir->current_key)
++ key_up(appleir, appleir->current_key);
++ appleir->current_key = keymap[(data[4] >> 1) & MAX_KEYS_MASK];
++
++ key_down(appleir, appleir->current_key);
++ /*remote doesn't do key up, either pull them up, in the test */
++ /*above, or here set a timer which pulls them up after 1/8 s */
++ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8);
++
++ return;
++ }
++
++ if (!memcmp(data, keyrepeat, sizeof(keyrepeat))) {
++ key_down(appleir, appleir->current_key);
++ /*remote doesn't do key up, either pull them up, in the test */
++ /*above, or here set a timer which pulls them up after 1/8 s */
++ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8);
++ return;
++ }
++
++ if (!memcmp(data, flatbattery, sizeof(flatbattery))) {
++ battery_flat(appleir);
++ /* Fall through */
++ }
++
++ dump_packet(appleir, "unknown packet", data, len);
++}
++
++static void appleir_urb(struct urb *urb)
++{
++ struct appleir *appleir = urb->context;
++ int status = urb->status;
++ int retval;
++
++ switch (status) {
++ case 0:
++ new_data(appleir, urb->transfer_buffer, urb->actual_length);
++ break;
++ case -ECONNRESET:
++ case -ENOENT:
++ case -ESHUTDOWN:
++ /* this urb is terminated, clean up */
++ dbginfo(&appleir->input_dev->dev, "%s - urb shutting down with status: %d", __func__,
++ urb->status);
++ return;
++ default:
++ dbginfo(&appleir->input_dev->dev, "%s - nonzero urb status received: %d", __func__,
++ urb->status);
++ }
++
++ retval = usb_submit_urb(urb, GFP_ATOMIC);
++ if (retval)
++ err("%s - usb_submit_urb failed with result %d", __func__,
++ retval);
++}
++
++static int appleir_open(struct input_dev *dev)
++{
++ struct appleir *appleir = input_get_drvdata(dev);
++ struct usb_interface *intf = usb_ifnum_to_if(appleir->usbdev, 0);
++ int r;
++
++ r = usb_autopm_get_interface(intf);
++ if (r) {
++ dev_err(&intf->dev,
++ "%s(): usb_autopm_get_interface() = %d\n", __func__, r);
++ return r;
++ }
++
++ mutex_lock(&appleir_mutex);
++
++ if (usb_submit_urb(appleir->urb, GFP_KERNEL)) {
++ r = -EIO;
++ goto fail;
++ }
++
++ appleir->flags |= APPLEIR_OPENED;
++
++ mutex_unlock(&appleir_mutex);
++
++ usb_autopm_put_interface(intf);
++
++ return 0;
++fail:
++ mutex_unlock(&appleir_mutex);
++ usb_autopm_put_interface(intf);
++ return r;
++}
++
++static void appleir_close(struct input_dev *dev)
++{
++ struct appleir *appleir = input_get_drvdata(dev);
++
++ mutex_lock(&appleir_mutex);
++
++ if (!(appleir->flags & APPLEIR_SUSPENDED)) {
++ usb_kill_urb(appleir->urb);
++ del_timer_sync(&appleir->key_up_timer);
++ }
++
++ appleir->flags &= ~APPLEIR_OPENED;
++
++ mutex_unlock(&appleir_mutex);
++}
++
++static int appleir_probe(struct usb_interface *intf,
++ const struct usb_device_id *id)
++{
++ struct usb_device *dev = interface_to_usbdev(intf);
++ struct usb_endpoint_descriptor *endpoint;
++ struct appleir *appleir = NULL;
++ struct input_dev *input_dev;
++ int retval = -ENOMEM;
++ int i;
++
++ appleir = kzalloc(sizeof(struct appleir), GFP_KERNEL);
++ if (!appleir)
++ goto fail;
++
++ appleir->data = usb_buffer_alloc(dev, URB_SIZE, GFP_KERNEL,
++ &appleir->dma_buf);
++ if (!appleir->data)
++ goto fail;
++
++ appleir->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!appleir->urb)
++ goto fail;
++
++ appleir->usbdev = dev;
++
++ input_dev = input_allocate_device();
++ if (!input_dev)
++ goto fail;
++
++ appleir->input_dev = input_dev;
++
++ usb_make_path(dev, appleir->phys, sizeof(appleir->phys));
++ strlcpy(appleir->phys, "/input0", sizeof(appleir->phys));
++
++ input_dev->name = "Apple infrared remote control driver";
++ input_dev->phys = appleir->phys;
++ usb_to_input_id(dev, &input_dev->id);
++ input_dev->dev.parent = &intf->dev;
++ input_set_drvdata(input_dev, appleir);
++
++ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
++ input_dev->ledbit[0] = 0;
++
++ for (i = 0; i < MAX_KEYS; i++)
++ set_bit(keymap[i], input_dev->keybit);
++
++ clear_bit(0, input_dev->keybit);
++
++ input_dev->open = appleir_open;
++ input_dev->close = appleir_close;
++
++ endpoint = &intf->cur_altsetting->endpoint[0].desc;
++
++ usb_fill_int_urb(appleir->urb, dev,
++ usb_rcvintpipe(dev, endpoint->bEndpointAddress),
++ appleir->data, 8,
++ appleir_urb, appleir, endpoint->bInterval);
++
++ appleir->urb->transfer_dma = appleir->dma_buf;
++ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++
++ usb_set_intfdata(intf, appleir);
++
++ init_timer(&appleir->key_up_timer);
++
++ appleir->key_up_timer.function = key_up_tick;
++ appleir->key_up_timer.data = (unsigned long)appleir;
++
++ appleir->timer_initted++;
++
++ retval = input_register_device(appleir->input_dev);
++ if (retval)
++ goto fail;
++
++ return 0;
++
++fail:
++ printk(KERN_WARNING "Failed to load appleir\n");
++ if (appleir) {
++ if (appleir->data)
++ usb_buffer_free(dev, URB_SIZE, appleir->data,
++ appleir->dma_buf);
++
++ if (appleir->timer_initted)
++ del_timer_sync(&appleir->key_up_timer);
++
++ if (appleir->input_dev)
++ input_free_device(appleir->input_dev);
++
++ kfree(appleir);
++ }
++
++ return retval;
++}
++
++static void appleir_disconnect(struct usb_interface *intf)
++{
++ struct appleir *appleir = usb_get_intfdata(intf);
++
++ usb_set_intfdata(intf, NULL);
++ if (appleir) {
++ input_unregister_device(appleir->input_dev);
++ if (appleir->timer_initted)
++ del_timer_sync(&appleir->key_up_timer);
++ usb_kill_urb(appleir->urb);
++ usb_free_urb(appleir->urb);
++ usb_buffer_free(interface_to_usbdev(intf), URB_SIZE,
++ appleir->data, appleir->dma_buf);
++ kfree(appleir);
++ }
++}
++
++static int appleir_suspend(struct usb_interface *interface,
++ pm_message_t message)
++{
++ struct appleir *appleir;
++
++ appleir = usb_get_intfdata(interface);
++
++ mutex_lock(&appleir_mutex);
++
++ if (appleir->flags & APPLEIR_OPENED) {
++ usb_kill_urb(appleir->urb);
++ del_timer_sync(&appleir->key_up_timer);
++ }
++
++ appleir->flags |= APPLEIR_SUSPENDED;
++
++ mutex_unlock(&appleir_mutex);
++
++ return 0;
++}
++
++static int appleir_resume(struct usb_interface *interface)
++{
++ struct appleir *appleir;
++
++ appleir = usb_get_intfdata(interface);
++
++ mutex_lock(&appleir_mutex);
++
++ if (appleir->flags & APPLEIR_OPENED) {
++ struct usb_endpoint_descriptor *endpoint;
++
++ endpoint = &interface->cur_altsetting->endpoint[0].desc;
++ usb_fill_int_urb(appleir->urb, appleir->usbdev,
++ usb_rcvintpipe(appleir->usbdev, endpoint->bEndpointAddress),
++ appleir->data, 8,
++ appleir_urb, appleir, endpoint->bInterval);
++ appleir->urb->transfer_dma = appleir->dma_buf;
++ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++
++ init_timer(&appleir->key_up_timer);
++
++ appleir->key_up_timer.function = key_up_tick;
++ appleir->key_up_timer.data = (unsigned long)appleir;
++ }
++
++ appleir->flags &= ~APPLEIR_SUSPENDED;
++
++ mutex_unlock(&appleir_mutex);
++
++ return 0;
++}
++
++static struct usb_driver appleir_driver = {
++ .name = "appleir",
++ .probe = appleir_probe,
++ .disconnect = appleir_disconnect,
++ .suspend = appleir_suspend,
++ .resume = appleir_resume,
++ .reset_resume = appleir_resume,
++ .id_table = appleir_ids,
++ .supports_autosuspend = 1,
++};
++
++static int __init appleir_init(void)
++{
++ int retval;
++
++ retval = usb_register(&appleir_driver);
++ if (retval)
++ goto out;
++ printk(KERN_INFO DRIVER_VERSION ":" DRIVER_DESC);
++out:
++ return retval;
++}
++
++static void __exit appleir_exit(void)
++{
++ usb_deregister(&appleir_driver);
++}
++
++module_init(appleir_init);
++module_exit(appleir_exit);
+--
+1.6.5.2
+
diff --git a/freed-ora/current/F-12/btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch b/freed-ora/current/F-12/btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch
new file mode 100644
index 000000000..079260349
--- /dev/null
+++ b/freed-ora/current/F-12/btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch
@@ -0,0 +1,42 @@
+From: Shi Weihua <shiwh@cn.fujitsu.com>
+Date: Tue, 18 May 2010 00:51:54 +0000 (+0000)
+Subject: Btrfs: prohibit a operation of changing acl's mask when noacl mount option used
+X-Git-Tag: v2.6.35-rc3~3^2~3
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=731e3d1b
+
+Btrfs: prohibit a operation of changing acl's mask when noacl mount option used
+
+when used Posix File System Test Suite(pjd-fstest) to test btrfs,
+some cases about setfacl failed when noacl mount option used.
+I simplified used commands in pjd-fstest, and the following steps
+can reproduce it.
+------------------------
+# cd btrfs-part/
+# mkdir aaa
+# setfacl -m m::rw aaa <- successed, but not expected by pjd-fstest.
+------------------------
+I checked ext3, a warning message occured, like as:
+ setfacl: aaa/: Operation not supported
+Certainly, it's expected by pjd-fstest.
+
+So, i compared acl.c of btrfs and ext3. Based on that, a patch created.
+Fortunately, it works.
+
+Signed-off-by: Shi Weihua <shiwh@cn.fujitsu.com>
+Signed-off-by: Chris Mason <chris.mason@oracle.com>
+---
+
+diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
+index 6b4d0cc..a372985 100644
+--- a/fs/btrfs/acl.c
++++ b/fs/btrfs/acl.c
+@@ -163,6 +163,9 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
+ if (!is_owner_or_cap(inode))
+ return -EPERM;
+
++ if (!IS_POSIXACL(inode))
++ return -EOPNOTSUPP;
++
+ if (value) {
+ acl = posix_acl_from_xattr(value, size);
+ if (acl == NULL) {
diff --git a/freed-ora/current/F-12/config-arm b/freed-ora/current/F-12/config-arm
new file mode 100644
index 000000000..0c90cd5a9
--- /dev/null
+++ b/freed-ora/current/F-12/config-arm
@@ -0,0 +1,102 @@
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+# CONFIG_SMP is not set
+
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+
+CONFIG_ARCH_VERSATILE=y
+CONFIG_ARCH_VERSATILE_PB=y
+CONFIG_MACH_VERSATILE_AB=y
+
+CONFIG_HIGHMEM=y
+# CONFIG_HIGHPTE is not set
+
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+
+# CONFIG_XIP_KERNEL is not set
+
+CONFIG_ATAGS_PROC=y
+
+# CONFIG_FPE_NWFPE is not set
+CONFIG_FPE_FASTFPE=y
+CONFIG_VFP=y
+
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+# CONFIG_PM_TRACE is not set
+CONFIG_SUSPEND=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_APM_EMULATION=y
+
+CONFIG_ARM_THUMB=y
+
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+CONFIG_CMDLINE="console=ttyAM0,115200 root=/dev/sda1 rootdelay=20"
+
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+# CONFIG_CPU_IDLE is not set
+
+CONFIG_LEDS=y
+CONFIG_LEDS_CPU=y
+
+CONFIG_MTD_AFS_PARTS=y
+CONFIG_MTD_ARM_INTEGRATOR=y
+CONFIG_MTD_IMPA7=y
+
+CONFIG_AX88796=m
+CONFIG_AX88796_93CX6=y
+CONFIG_SMC91X=m
+CONFIG_DM9000=m
+CONFIG_DM9000_DEBUGLEVEL=4
+# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set
+CONFIG_SMC911X=m
+CONFIG_SMSC911X=m
+
+CONFIG_SERIO_AMBAKMI=m
+
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+
+CONFIG_I2C_VERSATILE=y
+
+CONFIG_THERMAL=y
+
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+
+CONFIG_FB_ARMCLCD=m
+
+CONFIG_SND_ARM=y
+CONFIG_SND_ARMAACI=m
+
+CONFIG_USB_MUSB_HDRC=m
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_TUSB6010=y
+# CONFIG_USB_MUSB_DEBUG is not set
+
+CONFIG_MMC_ARMMMCI=m
+
+CONFIG_RTC_DRV_PL030=m
+CONFIG_RTC_DRV_PL031=m
+
+# CONFIG_SGI_IOC4 is not set
+
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_LL is not set
+
+CONFIG_ARM_UNWIND=y
+
+CONFIG_RCU_FANOUT=32
diff --git a/freed-ora/current/F-12/config-debug b/freed-ora/current/F-12/config-debug
new file mode 100644
index 000000000..0ded62b16
--- /dev/null
+++ b/freed-ora/current/F-12/config-debug
@@ -0,0 +1,64 @@
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+CONFIG_SND_PCM_XRUN_DEBUG=y
+
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_SPINLOCK=y
+
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAILSLAB=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAIL_MAKE_REQUEST=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_FAIL_IO_TIMEOUT=y
+
+CONFIG_SLUB_DEBUG_ON=y
+
+CONFIG_LOCK_STAT=y
+
+CONFIG_DEBUG_STACK_USAGE=y
+
+CONFIG_ACPI_DEBUG=y
+# CONFIG_ACPI_DEBUG_FUNC_TRACE is not set
+
+CONFIG_DEBUG_SG=y
+
+# CONFIG_DEBUG_PAGEALLOC is not set
+
+CONFIG_DEBUG_WRITECOUNT=y
+CONFIG_DEBUG_OBJECTS=y
+# CONFIG_DEBUG_OBJECTS_SELFTEST is not set
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+
+CONFIG_X86_PTDUMP=y
+
+CONFIG_CAN_DEBUG_DEVICES=y
+
+CONFIG_MODULE_FORCE_UNLOAD=y
+
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+
+CONFIG_DEBUG_NOTIFIERS=y
+
+CONFIG_DMA_API_DEBUG=y
+
+CONFIG_MMIOTRACE=y
+
+CONFIG_DEBUG_CREDENTIALS=y
+
+CONFIG_EXT4_DEBUG=y
+
+CONFIG_DEBUG_PERF_USE_VMALLOC=y
+
+# off in both production debug and nodebug builds,
+# on in rawhide nodebug builds
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+
+CONFIG_JBD2_DEBUG=y
diff --git a/freed-ora/current/F-12/config-i686-PAE b/freed-ora/current/F-12/config-i686-PAE
new file mode 100644
index 000000000..7f49f6160
--- /dev/null
+++ b/freed-ora/current/F-12/config-i686-PAE
@@ -0,0 +1,37 @@
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
+
+CONFIG_XEN_DEV_EVTCHN=m
+CONFIG_XEN_SYS_HYPERVISOR=y
+
+CONFIG_LANGWELL_IPC=y
+# CONFIG_IMG_DOES_NOT_SUPPORT_MENLOW is not set
+CONFIG_PVR_RELEASE="release"
+CONFIG_PVR_SERVICES4=y
+CONFIG_PVR_XOPEN_SOURCE=600
+CONFIG_PVR2D_VALIDATE_INPUT_PARAMS=y
+CONFIG_PVR_DISPLAY_CONTROLLER="mrstlfb"
+CONFIG_PVR_SGX_CORE_REV=121
+CONFIG_PVR_SUPPORT_SVRINIT=y
+CONFIG_PVR_SUPPORT_SGX=y
+CONFIG_PVR_SUPPORT_PERCONTEXT_PB=y
+CONFIG_PVR_SUPPORT_LINUX_X86_WRITECOMBINE=y
+CONFIG_PVR_TRANSFER_QUEUE=y
+CONFIG_PVR_SUPPORT_DRI_DRM=y
+CONFIG_PVR_SYS_USING_INTERRUPTS=y
+CONFIG_PVR_SUPPORT_HW_RECOVERY=y
+CONFIG_PVR_SUPPORT_POWER_MANAGEMENT=y
+CONFIG_PVR_SECURE_HANDLES=y
+CONFIG_PVR_USE_PTHREADS=y
+CONFIG_PVR_SUPPORT_SGX_EVENT_OBJECT=y
+CONFIG_PVR_SUPPORT_SGX_HWPERF=y
+CONFIG_PVR_SUPPORT_SGX_LOW_LATENCY_SCHEDULING=y
+CONFIG_PVR_SUPPORT_LINUX_X86_PAT=y
+CONFIG_PVR_PROC_USE_SEQ_FILE=y
+CONFIG_PVR_SUPPORT_SGX535=y
+# CONFIG_PVR_SUPPORT_CACHEFLUSH_ON_ALLOC is not set
+# CONFIG_PVR_SUPPORT_MEMINFO_IDS is not set
+CONFIG_PVR_SUPPORT_CACHE_LINE_FLUSH=y
+CONFIG_PVR_SUPPORT_CPU_CACHED_BUFFERS=y
+CONFIG_PVR_DEBUG_MESA_OGL_TRACE=y
+
diff --git a/freed-ora/current/F-12/config-ia64-generic b/freed-ora/current/F-12/config-ia64-generic
new file mode 100644
index 000000000..51e1bc91d
--- /dev/null
+++ b/freed-ora/current/F-12/config-ia64-generic
@@ -0,0 +1,206 @@
+#
+# Automatically generated make config: don't edit
+#
+
+#
+# Processor type and features
+#
+CONFIG_IA64=y
+CONFIG_64BIT=y
+# CONFIG_XEN is not set
+CONFIG_MMU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_EFI=y
+# CONFIG_ITANIUM is not set
+CONFIG_MCKINLEY=y
+CONFIG_IA64_GENERIC=y
+# CONFIG_IA64_DIG is not set
+# CONFIG_IA64_HP_ZX1 is not set
+# CONFIG_IA64_SGI_SN2 is not set
+CONFIG_IA64_ESI=y
+CONFIG_IA64_HP_AML_NFW=y
+CONFIG_MSPEC=y
+# CONFIG_IA64_HP_SIM is not set
+# CONFIG_IA64_PAGE_SIZE_4KB is not set
+# CONFIG_IA64_PAGE_SIZE_8KB is not set
+CONFIG_IA64_PAGE_SIZE_16KB=y
+# CONFIG_IA64_PAGE_SIZE_64KB is not set
+CONFIG_IA64_L1_CACHE_SHIFT=7
+CONFIG_NUMA=y
+# CONFIG_VIRTUAL_MEM_MAP is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_IA64_MCA_RECOVERY=m
+CONFIG_IA64_CYCLONE=y
+CONFIG_MMTIMER=y
+CONFIG_IOSAPIC=y
+CONFIG_FORCE_MAX_ZONEORDER=18
+CONFIG_NR_CPUS=1024
+# CONFIG_IA32_SUPPORT is not set
+# CONFIG_COMPAT is not set
+CONFIG_PERFMON=y
+CONFIG_IA64_PALINFO=y
+CONFIG_EFI_VARS=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=16
+CONFIG_EFI_PCDP=y
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_BLK_DEV_SGIIOC4=y
+
+#
+# Character devices
+#
+CONFIG_TCG_INFINEON=m
+
+#
+# Watchdog Cards
+#
+# CONFIG_HW_RANDOM is not set
+# CONFIG_GEN_RTC is not set
+CONFIG_EFI_RTC=y
+CONFIG_RTC_DRV_EFI=y
+
+
+#
+# AGP
+#
+CONFIG_AGP_I460=y
+CONFIG_AGP_HP_ZX1=y
+CONFIG_AGP_SGI_TIOCA=y
+
+#
+# HP Simulator drivers
+#
+# CONFIG_HP_SIMETH is not set
+# CONFIG_HP_SIMSERIAL is not set
+# CONFIG_HP_SIMSCSI is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_IA64_PRINT_HAZARDS is not set
+# CONFIG_DISABLE_VHPT is not set
+# CONFIG_IA64_DEBUG_CMPXCHG is not set
+# CONFIG_IA64_DEBUG_IRQ is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# SGI
+#
+CONFIG_SGI_SNSC=y
+CONFIG_SGI_TIOCX=y
+CONFIG_SGI_MBCS=m
+CONFIG_SGI_IOC3=m
+CONFIG_SGI_IOC4=y
+CONFIG_SGI_XP=m
+CONFIG_SGI_GRU=m
+# CONFIG_SGI_GRU_DEBUG is not set
+CONFIG_SERIAL_SGI_L1_CONSOLE=y
+CONFIG_SERIAL_SGI_IOC3=m
+CONFIG_SERIAL_SGI_IOC4=m
+
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_BUSLOGIC is not set
+
+#
+CONFIG_ACPI=y
+CONFIG_ACPI_AC=y
+# CONFIG_ACPI_ASUS is not set
+CONFIG_ACPI_PROCFS_POWER=y
+CONFIG_ACPI_SYSFS_POWER=y
+# CONFIG_ACPI_BATTERY is not set
+CONFIG_ACPI_BLACKLIST_YEAR=0
+CONFIG_ACPI_BUTTON=y
+# CONFIG_ACPI_DOCK is not set
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_HOTPLUG_MEMORY=y
+CONFIG_ACPI_NUMA=y
+CONFIG_ACPI_POWER=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_PROCFS=y
+CONFIG_ACPI_SLEEP=y
+CONFIG_ACPI_THERMAL=y
+# CONFIG_ACPI_TOSHIBA is not set
+CONFIG_ACPI_VIDEO=m
+# CONFIG_ACPI_PROC_EVENT is not set
+
+CONFIG_PM=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HPET is not set
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+CONFIG_HOTPLUG_PCI_SHPC=m
+CONFIG_HOTPLUG_PCI_SGI=m
+CONFIG_PNPACPI=y
+
+CONFIG_SCHED_SMT=y
+
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
+
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEBUG=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+
+CONFIG_IA64_ACPI_CPUFREQ=m
+
+# CONFIG_PERMIT_BSP_REMOVE is not set
+# CONFIG_FORCE_CPEI_RETARGET is not set
+
+CONFIG_NODES_SHIFT=10
+
+
+CONFIG_HW_RANDOM_INTEL=m
+
+CONFIG_CRASH_DUMP=y
+CONFIG_PROC_VMCORE=y
+
+# drivers/media/video/usbvision/usbvision-i2c.c:64:39: error: macro "outb" passed 4 arguments, but takes just 2
+# CONFIG_VIDEO_USBVISION is not set
+
+# CONFIG_IA64_MC_ERR_INJECT is not set
+
+CONFIG_DMIID=y
+
+CONFIG_SENSORS_I5K_AMB=m
+
+CONFIG_SPARSEMEM_VMEMMAP=y
+
+CONFIG_FRAME_WARN=2048
+
+CONFIG_VIRT_CPU_ACCOUNTING=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_INTEL=m
+
+CONFIG_HP_ILO=m
+
+CONFIG_PARAVIRT_GUEST=y
+CONFIG_PARAVIRT=y
+
+CONFIG_DMAR_DEFAULT_ON=y
+
+CONFIG_RCU_FANOUT=64
+
+CONFIG_ACPI_POWER_METER=m
+CONFIG_I2C_SCMI=m
diff --git a/freed-ora/current/F-12/config-nodebug b/freed-ora/current/F-12/config-nodebug
new file mode 100644
index 000000000..26f0fa05a
--- /dev/null
+++ b/freed-ora/current/F-12/config-nodebug
@@ -0,0 +1,64 @@
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+CONFIG_SND_PCM_XRUN_DEBUG=y
+
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_FAILSLAB is not set
+# CONFIG_FAIL_PAGE_ALLOC is not set
+# CONFIG_FAIL_MAKE_REQUEST is not set
+# CONFIG_FAULT_INJECTION_DEBUG_FS is not set
+# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set
+# CONFIG_FAIL_IO_TIMEOUT is not set
+
+# CONFIG_SLUB_DEBUG_ON is not set
+
+# CONFIG_LOCK_STAT is not set
+
+# CONFIG_DEBUG_STACK_USAGE is not set
+
+# CONFIG_ACPI_DEBUG is not set
+# CONFIG_ACPI_DEBUG_FUNC_TRACE is not set
+
+# CONFIG_DEBUG_SG is not set
+
+# CONFIG_DEBUG_PAGEALLOC is not set
+
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_OBJECTS_SELFTEST is not set
+# CONFIG_DEBUG_OBJECTS_FREE is not set
+# CONFIG_DEBUG_OBJECTS_TIMERS is not set
+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+
+# CONFIG_X86_PTDUMP is not set
+
+# CONFIG_CAN_DEBUG_DEVICES is not set
+
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+
+# CONFIG_DEBUG_NOTIFIERS is not set
+
+# CONFIG_DMA_API_DEBUG is not set
+
+# CONFIG_MMIOTRACE is not set
+
+# CONFIG_DEBUG_CREDENTIALS is not set
+
+# off in both production debug and nodebug builds,
+# on in rawhide nodebug builds
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+
+# CONFIG_EXT4_DEBUG is not set
+
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+
+# CONFIG_JBD2_DEBUG is not set
diff --git a/freed-ora/current/F-12/config-powerpc-generic b/freed-ora/current/F-12/config-powerpc-generic
new file mode 100644
index 000000000..598fbb547
--- /dev/null
+++ b/freed-ora/current/F-12/config-powerpc-generic
@@ -0,0 +1,326 @@
+# Most PowerPC kernels we build are SMP
+CONFIG_SMP=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_PPC=y
+CONFIG_WATCHDOG_RTAS=m
+CONFIG_DEBUGGER=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_ALTIVEC=y
+
+CONFIG_TAU=y
+# CONFIG_TAU_INT is not set
+CONFIG_TAU_AVERAGE=y
+
+CONFIG_SECCOMP=y
+
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEBUG=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+
+CONFIG_PM=y
+
+CONFIG_PM_STD_PARTITION=""
+
+CONFIG_SUSPEND=y
+CONFIG_HIBERNATION=y
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_GEN_RTC_X is not set
+CONFIG_RTC_DRV_GENERIC=y
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+
+CONFIG_ADB=y
+CONFIG_ADB_PMU=y
+CONFIG_WINDFARM=y
+CONFIG_WINDFARM_PM112=y
+CONFIG_I2C_POWERMAC=y
+CONFIG_APPLE_AIRPORT=m
+CONFIG_SERIAL_PMACZILOG=m
+# CONFIG_SERIAL_PMACZILOG_TTYS is not set
+CONFIG_AGP_UNINORTH=y
+CONFIG_FB_OF=y
+# CONFIG_FB_CONTROL is not set
+CONFIG_FB_IBM_GXT4500=y
+CONFIG_FB_RADEON=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_NVIDIA=m
+# CONFIG_FB_VGA16 is not set
+CONFIG_FB_ATY128_BACKLIGHT=y
+CONFIG_FB_ATY_BACKLIGHT=y
+CONFIG_FB_RADEON_BACKLIGHT=y
+CONFIG_FB_RIVA_BACKLIGHT=y
+CONFIG_FB_NVIDIA_BACKLIGHT=y
+
+CONFIG_SND_POWERMAC=m
+CONFIG_SND_POWERMAC_AUTO_DRC=y
+CONFIG_SND_AOA=m
+CONFIG_SND_AOA_SOUNDBUS=m
+CONFIG_SND_AOA_FABRIC_LAYOUT=m
+CONFIG_SND_AOA_ONYX=m
+CONFIG_SND_AOA_TAS=m
+CONFIG_SND_AOA_TOONIE=m
+CONFIG_SND_AOA_SOUNDBUS_I2S=m
+
+CONFIG_XMON=y
+# CONFIG_XMON_DEFAULT is not set
+CONFIG_XMON_DISASSEMBLY=y
+
+CONFIG_BOOTX_TEXT=y
+CONFIG_MAC_EMUMOUSEBTN=y
+CONFIG_CAPI_EICON=y
+
+CONFIG_NVRAM=y
+
+# CONFIG_PCMCIA_M8XX is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_NI52 is not set
+# CONFIG_NI65 is not set
+# CONFIG_LANCE is not set
+# CONFIG_3C515 is not set
+# CONFIG_ELPLUS is not set
+
+CONFIG_MEMORY_HOTPLUG=y
+
+# Stuff which wants bus_to_virt() or virt_to_bus()
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_VIDEO_STRADIS is not set
+# CONFIG_VIDEO_ZORAN is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+
+
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+# CONFIG_PMAC_BACKLIGHT_LEGACY is not set
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_GPIO=m
+
+# FIXME: Should depend on IA64/x86
+# CONFIG_SGI_IOC4 is not set
+
+CONFIG_PPC_EFIKA=y
+CONFIG_PPC_MEDIA5200=y
+
+# CONFIG_PPC_LITE5200 is not set
+CONFIG_PPC_BESTCOMM=y
+CONFIG_PMAC_RACKMETER=m
+CONFIG_USB_OHCI_HCD_PPC_SOC=y
+CONFIG_USB_OHCI_HCD_PCI=y
+CONFIG_USB_OHCI_HCD_PPC_OF=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
+
+CONFIG_SERIAL_UARTLITE=m
+CONFIG_SERIAL_UARTLITE_CONSOLE=y
+
+CONFIG_SENSORS_AMS=m
+CONFIG_SENSORS_AMS_PMU=y
+CONFIG_SENSORS_AMS_I2C=y
+
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_IDECS is not set
+CONFIG_BLK_DEV_IDECD=m
+# CONFIG_BLK_DEV_IDETAPE is not set
+CONFIG_IDE_TASK_IOCTL=y
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+# CONFIG_BLK_DEV_IDEPNP is not set
+# CONFIG_BLK_DEV_IDEPCI is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SL82C105 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+CONFIG_BLK_DEV_IDE_PMAC=y
+CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+CONFIG_MTD_PHYSMAP_OF=m
+CONFIG_IDE_PROC_FS=y
+CONFIG_MACINTOSH_DRIVERS=y
+
+CONFIG_PPC_PASEMI_MDIO=m
+CONFIG_SPU_FS_64K_LS=y
+CONFIG_PPC_PASEMI_CPUFREQ=y
+CONFIG_PMAC_APM_EMU=m
+CONFIG_HW_RANDOM_PASEMI=m
+
+CONFIG_EDAC=y
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_MM_EDAC=m
+CONFIG_EDAC_PASEMI=m
+CONFIG_EDAC_AMD8131=m
+CONFIG_EDAC_AMD8111=m
+
+CONFIG_AXON_RAM=m
+CONFIG_OPROFILE_CELL=y
+
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_PATA_PLATFORM=m
+CONFIG_PATA_OF_PLATFORM=m
+CONFIG_USB_EHCI_HCD_PPC_OF=y
+
+# CONFIG_MPC5121_ADS is not set
+# CONFIG_MPC5121_GENERIC is not set
+CONFIG_MTD_OF_PARTS=m
+# CONFIG_MTD_NAND_FSL_ELBC is not set
+CONFIG_THERMAL=y
+
+# CONFIG_MEMORY_HOTREMOVE is not set
+
+CONFIG_DMADEVICES=y
+# CONFIG_FSL_DMA is not set
+
+CONFIG_SND_PPC=y
+
+CONFIG_PPC_82xx=y
+CONFIG_PPC_83xx=y
+CONFIG_PPC_86xx=y
+CONFIG_EXTRA_TARGETS=""
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+
+# CONFIG_MATH_EMULATION is not set
+# CONFIG_RAPIDIO is not set
+# CONFIG_FS_ENET is not set
+# CONFIG_UCC_GETH is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_SERIAL_CPM is not set
+# CONFIG_SERIAL_QE is not set
+# CONFIG_I2C_CPM is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_W1_MASTER_GPIO is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+# CONFIG_GPIO_MAX732X is not set
+
+
+CONFIG_SERIO_XILINX_XPS_PS2=m
+
+# CONFIG_PPC_SMLPAR is not set
+
+CONFIG_MGCOGE=y
+CONFIG_GEF_SBC610=y
+CONFIG_GEF_PPC9A=y
+CONFIG_GEF_SBC310=y
+
+CONFIG_QUICC_ENGINE=y
+CONFIG_QE_GPIO=y
+CONFIG_MPC8xxx_GPIO=y
+
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+CONFIG_IDE_GD_ATAPI=y
+
+CONFIG_MCU_MPC8349EMITX=m
+
+CONFIG_GPIO_XILINX=y
+
+CONFIG_UCB1400_CORE=m
+
+CONFIG_PMIC_DA903X=y
+CONFIG_BACKLIGHT_DA903X=m
+CONFIG_LEDS_DA903X=m
+
+CONFIG_MSI_BITMAP_SELFTEST=y
+
+CONFIG_RELOCATABLE=y
+
+# CONFIG_HVC_UDBG is not set
+CONFIG_PRINT_STACK_DEPTH=64
+
+CONFIG_BATTERY_DA9030=m
+# CONFIG_TWL4030_CORE is not set
+
+CONFIG_BLK_DEV_IT8172=m
+CONFIG_TOUCHSCREEN_DA9034=m
+
+CONFIG_SIMPLE_GPIO=y
+
+CONFIG_FSL_PQ_MDIO=m
+
+CONFIG_PS3_VRAM=m
+CONFIG_MDIO_GPIO=m
+CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL=m
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCF857X=m
+CONFIG_USB_GPIO_VBUS=m
+
+CONFIG_USB_FHCI_HCD=m
+# CONFIG_FHCI_DEBUG is not set
+
+# CONFIG_DRM_RADEON_KMS is not set
+
+# CONFIG_AMIGAONE is not set
+
+CONFIG_PPC_OF_BOOT_TRAMPOLINE=y
+
+CONFIG_DTL=y
+
+CONFIG_MMC_SDHCI_OF=m
+
+# CONFIG_CONSISTENT_SIZE_BOOL is not set
+
+CONFIG_CAN_SJA1000_OF_PLATFORM=m
+
+CONFIG_PPC_EMULATED_STATS=y
+
+CONFIG_SWIOTLB=y
+
+# CONFIG_RDS is not set
+
+CONFIG_PPC_DISABLE_WERROR=y
+
+CONFIG_XILINX_EMACLITE=m
+
+CONFIG_GPIO_WM831X=m
+# CONFIG_GPIO_LANGWELL is not set
+# CONFIG_GPIO_UCB1400 is not set
+CONFIG_EDAC_MPC85XX=m
+
diff --git a/freed-ora/current/F-12/config-powerpc32-generic b/freed-ora/current/F-12/config-powerpc32-generic
new file mode 100644
index 000000000..39f9f745d
--- /dev/null
+++ b/freed-ora/current/F-12/config-powerpc32-generic
@@ -0,0 +1,184 @@
+# CONFIG_SMP is not set
+CONFIG_PPC32=y
+# CONFIG_PPC64 is not set
+# CONFIG_RTAS_PROC is not set
+# CONFIG_PCMCIA_M8XX is not set
+# CONFIG_HOTPLUG_PCI is not set
+CONFIG_CPU_FREQ_PMAC=y
+CONFIG_PPC_CHRP=y
+CONFIG_PPC_PMAC=y
+CONFIG_PPC_MPC52xx=y
+CONFIG_PPC_PREP=y
+
+# CONFIG_PPC_MPC5200_SIMPLE is not set
+CONFIG_SATA_FSL=m
+# CONFIG_SATA_NV is not set
+
+# busted in .28git1
+# ERROR: "cacheable_memzero" [drivers/net/gianfar_driver.ko] undefined!
+# CONFIG_GIANFAR is not set
+CONFIG_USB_EHCI_FSL=y
+
+CONFIG_PMAC_APM_EMU=y
+CONFIG_PMAC_BACKLIGHT=y
+
+CONFIG_HIGHMEM=y
+# CONFIG_HIGHMEM_START_BOOL is not set
+# CONFIG_LOWMEM_SIZE_BOOL is not set
+# CONFIG_TASK_SIZE_BOOL is not set
+# CONFIG_KERNEL_START_BOOL is not set
+# CONFIG_PPC601_SYNC_FIX is not set
+CONFIG_ADVANCED_OPTIONS=y
+CONFIG_SCSI_MESH=m
+CONFIG_SCSI_MESH_SYNC_RATE=5
+CONFIG_SCSI_MESH_RESET_DELAY_MS=4000
+
+CONFIG_SCSI_MAC53C94=m
+CONFIG_ADB_CUDA=y
+CONFIG_ADB_MACIO=y
+CONFIG_INPUT_ADBHID=y
+CONFIG_ADB_PMU_LED=y
+CONFIG_ADB_PMU_LED_IDE=y
+
+CONFIG_PMAC_MEDIABAY=y
+CONFIG_BMAC=m
+CONFIG_MACE=m
+# CONFIG_MACE_AAUI_PORT is not set
+CONFIG_MV643XX_ETH=m
+CONFIG_I2C_HYDRA=m
+CONFIG_I2C_MPC=m
+CONFIG_THERM_WINDTUNNEL=m
+CONFIG_THERM_ADT746X=m
+# CONFIG_ANSLCD is not set
+
+CONFIG_FB_PLATINUM=y
+CONFIG_FB_VALKYRIE=y
+CONFIG_FB_CT65550=y
+# CONFIG_BDI_SWITCH is not set
+
+CONFIG_MAC_FLOPPY=m
+# CONFIG_BLK_DEV_FD is not set
+
+CONFIG_FB_ATY128=y
+CONFIG_FB_ATY=y
+CONFIG_FB_MATROX=y
+# CONFIG_KEXEC is not set
+
+# CONFIG_HVC_RTAS is not set
+
+# CONFIG_UDBG_RTAS_CONSOLE is not set
+CONFIG_BRIQ_PANEL=m
+
+# CONFIG_ATA_PIIX is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ATIIXP is not set
+CONFIG_PATA_MPC52xx=m
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_SERVERWORKS is not set
+
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
+# CONFIG_MPC5200_WDT is not set
+CONFIG_8xxx_WDT=m
+CONFIG_GEF_WDT=m
+
+CONFIG_PPC_MPC5200_BUGFIX=y
+CONFIG_FEC_MPC52xx=m
+#CHECK: This may later become a tristate.
+CONFIG_FEC_MPC52xx_MDIO=y
+CONFIG_PPC_MPC5200_GPIO=y
+CONFIG_MDIO_GPIO=m
+
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+
+# CONFIG_EMBEDDED6xx is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+# CONFIG_BLK_DEV_PLATFORM is not set
+# CONFIG_BLK_DEV_4DRIVES is not set
+# CONFIG_BLK_DEV_ALI14XX is not set
+# CONFIG_BLK_DEV_DTC2278 is not set
+# CONFIG_BLK_DEV_HT6560B is not set
+# CONFIG_BLK_DEV_QD65XX is not set
+# CONFIG_BLK_DEV_UMC8672 is not set
+
+# CONFIG_VIRQ_DEBUG is not set
+
+CONFIG_PPC_BESTCOMM_ATA=m
+CONFIG_PPC_BESTCOMM_FEC=m
+CONFIG_PPC_BESTCOMM_GEN_BD=m
+
+CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_PAGE_OFFSET_BOOL is not set
+# CONFIG_FB_FSL_DIU is not set
+CONFIG_IRQSTACKS=y
+CONFIG_VIRTUALIZATION=y
+
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_HTC_EGPIO is not set
+
+# CONFIG_TIFM_CORE is not set
+
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_CISS_SCSI_TAPE is not set
+
+# CONFIG_I2C_NFORCE2 is not set
+
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+
+# CONFIG_MEMSTICK is not set
+
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_TCG_TPM is not set
+
+# PPC gets sad with debug alloc (bz 448598)
+# CONFIG_DEBUG_PAGEALLOC is not set
+
+CONFIG_SND_ISA=y
+CONFIG_CRYPTO_DEV_TALITOS=m
+
+CONFIG_FSL_EMB_PERFMON=y
+CONFIG_MPC8272_ADS=y
+CONFIG_PQ2FADS=y
+CONFIG_EP8248E=y
+CONFIG_MPC831x_RDB=y
+CONFIG_MPC832x_MDS=y
+CONFIG_MPC832x_RDB=y
+CONFIG_MPC834x_MDS=y
+CONFIG_MPC834x_ITX=y
+CONFIG_MPC836x_MDS=y
+CONFIG_MPC836x_RDK=y
+CONFIG_MPC837x_MDS=y
+CONFIG_MPC837x_RDB=y
+CONFIG_SBC834x=y
+CONFIG_ASP834x=y
+CONFIG_KMETER1=y
+CONFIG_MPC8641_HPCN=y
+CONFIG_SBC8641D=y
+CONFIG_MPC8610_HPCD=y
+
+# CONFIG_USB_MUSB_HDRC is not set
+
+# busted in 2.6.27
+# drivers/mtd/maps/sbc8240.c: In function 'init_sbc8240_mtd':
+# drivers/mtd/maps/sbc8240.c:172: warning: passing argument 1 of 'simple_map_init' from incompatible pointer type
+# drivers/mtd/maps/sbc8240.c:177: error: 'struct mtd_info' has no member named 'module'
+
+CONFIG_MTD_NAND_FSL_UPM=m
+
+CONFIG_USB_GPIO_VBUS=m
+
+CONFIG_RCU_FANOUT=32
+
+CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+
diff --git a/freed-ora/current/F-12/config-powerpc32-smp b/freed-ora/current/F-12/config-powerpc32-smp
new file mode 100644
index 000000000..e60f59cdf
--- /dev/null
+++ b/freed-ora/current/F-12/config-powerpc32-smp
@@ -0,0 +1,4 @@
+CONFIG_SMP=y
+# CONFIG_HOTPLUG_CPU is not set
+CONFIG_NR_CPUS=4
+# CONFIG_BATTERY_PMU is not set
diff --git a/freed-ora/current/F-12/config-powerpc64 b/freed-ora/current/F-12/config-powerpc64
new file mode 100644
index 000000000..d9f7983db
--- /dev/null
+++ b/freed-ora/current/F-12/config-powerpc64
@@ -0,0 +1,176 @@
+CONFIG_WINDFARM_PM81=y
+CONFIG_WINDFARM_PM91=y
+CONFIG_WINDFARM_PM121=y
+CONFIG_PPC_PMAC64=y
+CONFIG_PPC_MAPLE=y
+CONFIG_PPC_CELL=y
+CONFIG_PPC_IBM_CELL_BLADE=y
+CONFIG_PPC_ISERIES=y
+CONFIG_PPC_PSERIES=y
+CONFIG_PPC_PMAC=y
+CONFIG_PPC_PASEMI=y
+# CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE is not set
+CONFIG_PPC_PS3=y
+CONFIG_PPC_CELLEB=y
+CONFIG_PPC_CELL_QPACE=y
+CONFIG_PS3_HTAB_SIZE=20
+# CONFIG_PS3_DYNAMIC_DMA is not set
+CONFIG_PS3_ADVANCED=y
+CONFIG_PS3_HTAB_SIZE=20
+# CONFIG_PS3_DYNAMIC_DMA is not set
+CONFIG_PS3_VUART=y
+CONFIG_PS3_PS3AV=y
+CONFIG_PS3_STORAGE=m
+CONFIG_PS3_DISK=m
+CONFIG_PS3_ROM=m
+CONFIG_PS3_FLASH=m
+CONFIG_PS3_LPM=y
+CONFIG_SND_PS3=m
+CONFIG_SND_PS3_DEFAULT_START_DELAY=1000
+CONFIG_GELIC_NET=m
+CONFIG_GELIC_WIRELESS=y
+CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE=y
+CONFIG_CBE_THERM=m
+CONFIG_CBE_CPUFREQ=m
+CONFIG_CBE_CPUFREQ_PMI=m
+CONFIG_CBE_CPUFREQ_PMI_ENABLE=y
+CONFIG_PMAC_RACKMETER=m
+CONFIG_IBMEBUS=y
+CONFIG_SPU_FS=m
+CONFIG_RTAS_FLASH=y
+CONFIG_PPC_SPLPAR=y
+CONFIG_SCANLOG=y
+CONFIG_LPARCFG=y
+CONFIG_SERIAL_ICOM=m
+CONFIG_HVCS=m
+CONFIG_HVC_CONSOLE=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_THERM_PM72=y
+CONFIG_IBMVETH=m
+CONFIG_SCSI_IBMVSCSI=m
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+CONFIG_HOTPLUG_PCI_SHPC=m
+CONFIG_HOTPLUG_PCI_RPA=m
+CONFIG_HOTPLUG_PCI_RPA_DLPAR=y
+CONFIG_ADB_PMU_LED=y
+CONFIG_ADB_PMU_LED_IDE=y
+CONFIG_PMAC_SMU=y
+CONFIG_CPU_FREQ_PMAC64=y
+CONFIG_SCSI_IPR=m
+CONFIG_SCSI_IPR_TRACE=y
+CONFIG_SCSI_IPR_DUMP=y
+CONFIG_SPIDER_NET=m
+CONFIG_HVC_RTAS=y
+CONFIG_HVC_ISERIES=y
+CONFIG_CBE_RAS=y
+
+# iSeries device drivers
+#
+CONFIG_ISERIES_VETH=m
+CONFIG_VIODASD=m
+CONFIG_VIOCD=m
+CONFIG_VIOTAPE=m
+
+CONFIG_PASEMI_MAC=m
+CONFIG_SERIAL_OF_PLATFORM=m
+
+CONFIG_PPC_PASEMI_IOMMU=y
+CONFIG_SERIAL_TXX9=y
+CONFIG_SERIAL_TXX9_NR_UARTS=6
+CONFIG_SERIAL_TXX9_CONSOLE=y
+
+CONFIG_HVC_BEAT=y
+
+CONFIG_FB_PS3=y
+CONFIG_FB_PS3_DEFAULT_SIZE_M=18
+
+CONFIG_PPC_PMI=m
+CONFIG_PS3_SYS_MANAGER=y
+# CONFIG_BLK_DEV_CELLEB is not set
+
+CONFIG_PATA_SCC=m
+
+CONFIG_APM_EMULATION=m
+
+CONFIG_PPC64=y
+CONFIG_VIRT_CPU_ACCOUNTING=y
+CONFIG_NR_CPUS=128
+# CONFIG_FB_PLATINUM is not set
+# CONFIG_FB_VALKYRIE is not set
+# CONFIG_FB_CT65550 is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+
+# CONFIG_POWER4_ONLY is not set
+
+CONFIG_RTAS_PROC=y
+CONFIG_IOMMU_VMERGE=y
+CONFIG_NUMA=y
+# CONFIG_PPC_64K_PAGES is not set
+CONFIG_SCHED_SMT=y
+
+# CONFIG_MV643XX_ETH is not set
+CONFIG_IRQSTACKS=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_INPUT_PCSPKR is not set
+
+CONFIG_EHEA=m
+CONFIG_INFINIBAND_EHCA=m
+
+CONFIG_HCALL_STATS=y
+
+CONFIG_XMON_DISASSEMBLY=y
+
+CONFIG_SCSI_IBMVSCSIS=m
+
+CONFIG_SECCOMP=y
+
+CONFIG_TUNE_CELL=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_BLK_DEV_PLATFORM is not set
+CONFIG_IBM_NEW_EMAC=m
+CONFIG_IBM_NEW_EMAC_RXB=128
+CONFIG_IBM_NEW_EMAC_TXB=64
+CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
+CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
+CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
+# CONFIG_IBM_NEW_EMAC_DEBUG is not set
+
+# CONFIG_VIRQ_DEBUG is not set
+CONFIG_ELECTRA_CF=m
+
+CONFIG_SPARSEMEM_VMEMMAP=y
+
+CONFIG_MTD_NAND_PASEMI=m
+CONFIG_EDAC_CELL=m
+CONFIG_EDAC_CPC925=m
+CONFIG_FRAME_WARN=2048
+
+CONFIG_PHYP_DUMP=y
+CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_VIRTUALIZATION=y
+
+CONFIG_VSX=y
+
+CONFIG_SCSI_IBMVFC=m
+# CONFIG_SCSI_IBMVFC_TRACE is not set
+CONFIG_IBM_BSR=m
+
+CONFIG_SERIO_XILINX_XPS_PS2=m
+
+CONFIG_PPC_IBM_CELL_RESETBUTTON=y
+CONFIG_PPC_IBM_CELL_POWERBUTTON=m
+CONFIG_CBE_CPUFREQ_SPU_GOVERNOR=m
+
+CONFIG_RTC_DRV_PS3=y
+
+CONFIG_CRASH_DUMP=y
+CONFIG_RELOCATABLE=y
+
+CONFIG_RCU_FANOUT=64
+
+CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
diff --git a/freed-ora/current/F-12/config-rhel-generic b/freed-ora/current/F-12/config-rhel-generic
new file mode 100644
index 000000000..197ae8378
--- /dev/null
+++ b/freed-ora/current/F-12/config-rhel-generic
@@ -0,0 +1,204 @@
+# CONFIG_ISA is not set
+# CONFIG_ISAPNP is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_DC390T is not set
+
+# CONFIG_ATALK is not set
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_LTPC is not set
+# CONFIG_COPS is not set
+# CONFIG_IPX is not set
+# CONFIG_IPDDP is not set
+# CONFIG_DECNET is not set
+# CONFIG_PLIP is not set
+
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+# CONFIG_EL2 is not set
+# CONFIG_ELPLUS is not set
+# CONFIG_WD80x3 is not set
+# CONFIG_I82092 is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_ULTRA is not set
+# CONFIG_SKFP is not set
+# CONFIG_DE600 is not set
+# CONFIG_DE620 is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_AC3200 is not set
+# CONFIG_NI52 is not set
+# CONFIG_NI65 is not set
+# CONFIG_LANCE is not set
+# CONFIG_EL16 is not set
+# CONFIG_EL3 is not set
+# CONFIG_3C515 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_HP100 is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_SB1000 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_ATP is not set
+
+# CONFIG_TR is not set
+
+# CONFIG_GAMEPORT is not set
+
+# CONFIG_SND_AD1816A is not set
+# CONFIG_SND_AD1848 is not set
+# CONFIG_SND_CS4231 is not set
+# CONFIG_SND_CS4236 is not set
+# CONFIG_SND_ES968 is not set
+# CONFIG_SND_ES1688 is not set
+# CONFIG_SND_ES18XX is not set
+# CONFIG_SND_GUSCLASSIC is not set
+# CONFIG_SND_GUSEXTREME is not set
+# CONFIG_SND_GUSMAX is not set
+# CONFIG_SND_INTERWAVE is not set
+# CONFIG_SND_INTERWAVE_STB is not set
+# CONFIG_SND_OPTI92X_AD1848 is not set
+# CONFIG_SND_OPTI92X_CS4231 is not set
+# CONFIG_SND_OPTI93X is not set
+# CONFIG_SND_MIRO is not set
+# CONFIG_SND_SB8 is not set
+# CONFIG_SND_SB16 is not set
+# CONFIG_SND_SBAWE is not set
+# CONFIG_SND_SB16_CSP is not set
+# CONFIG_SND_WAVEFRONT is not set
+# CONFIG_SND_ALS100 is not set
+# CONFIG_SND_AZT2320 is not set
+# CONFIG_SND_CMI8330 is not set
+# CONFIG_SND_DT019X is not set
+# CONFIG_SND_OPL3SA2 is not set
+# CONFIG_SND_SGALAXY is not set
+# CONFIG_SND_SSCAPE is not set
+
+# CONFIG_WAN_ROUTER is not set
+
+# CONFIG_BINFMT_AOUT is not set
+
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_SIS is not set
+
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+
+# CONFIG_PARIDE is not set
+
+# CONFIG_I2O is not set
+
+# CONFIG_MWAVE is not set
+
+# CONFIG_ROCKETPORT is not set
+# CONFIG_R3964 is not set
+
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_DB9 is not set
+# CONFIG_JOYSTICK_GAMECON is not set
+# CONFIG_JOYSTICK_TURBOGRAFX is not set
+
+# CONFIG_RADIO_CADET is not set
+# CONFIG_RADIO_RTRACK is not set
+# CONFIG_RADIO_RTRACK2 is not set
+# CONFIG_RADIO_AZTECH is not set
+# CONFIG_RADIO_GEMTEK is not set
+# CONFIG_RADIO_GEMTEK_PCI is not set
+# CONFIG_RADIO_MAXIRADIO is not set
+# CONFIG_RADIO_MAESTRO is not set
+# CONFIG_RADIO_SF16FMI is not set
+# CONFIG_RADIO_SF16FMR2 is not set
+# CONFIG_RADIO_TERRATEC is not set
+# CONFIG_RADIO_TRUST is not set
+# CONFIG_RADIO_TYPHOON is not set
+# CONFIG_RADIO_ZOLTRIX is not set
+
+
+# CONFIG_VIDEO_PMS is not set
+# CONFIG_VIDEO_BWQCAM is not set
+# CONFIG_VIDEO_CQCAM is not set
+# CONFIG_VIDEO_W9966 is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA_PP is not set
+# CONFIG_VIDEO_CPIA_USB is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_STRADIS is not set
+# CONFIG_VIDEO_ZORAN is not set
+# CONFIG_VIDEO_ZORAN_BUZ is not set
+# CONFIG_VIDEO_ZORAN_DC10 is not set
+# CONFIG_VIDEO_ZORAN_DC30 is not set
+# CONFIG_VIDEO_ZORAN_LML33 is not set
+# CONFIG_VIDEO_ZORAN_LML33R10 is not set
+# CONFIG_VIDEO_MEYE is not set
+# CONFIG_VIDEO_SAA7134 is not set
+# CONFIG_VIDEO_MXB is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_CX88 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+
+# CONFIG_INFTL is not set
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PCI is not set
+
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_HGA_ACCEL is not set
+# CONFIG_FB_3DFX_ACCEL is not set
+
+# CONFIG_JFS_FS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_9P_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+
diff --git a/freed-ora/current/F-12/config-s390x b/freed-ora/current/F-12/config-s390x
new file mode 100644
index 000000000..632983bff
--- /dev/null
+++ b/freed-ora/current/F-12/config-s390x
@@ -0,0 +1,227 @@
+CONFIG_64BIT=y
+# CONFIG_MARCH_G5 is not set
+# CONFIG_MARCH_Z900 is not set
+CONFIG_MARCH_Z9_109=y
+# CONFIG_MARCH_Z990 is not set
+
+CONFIG_NR_CPUS=64
+CONFIG_COMPAT=y
+
+# See bug 496596
+CONFIG_HZ_100=y
+# See bug 496605
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+
+CONFIG_MMU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NO_IDLE_HZ=y
+
+CONFIG_SMP=y
+
+#
+# I/O subsystem configuration
+#
+CONFIG_QDIO=m
+
+#
+# Misc
+#
+CONFIG_IPL=y
+# CONFIG_IPL_TAPE is not set
+CONFIG_IPL_VM=y
+# CONFIG_PROCESS_DEBUG is not set
+CONFIG_PFAULT=y
+CONFIG_SHARED_KERNEL=y
+CONFIG_CMM=m
+CONFIG_CMM_PROC=y
+CONFIG_NETIUCV=m
+CONFIG_SMSGIUCV=m
+
+#
+# SCSI low-level drivers
+#
+CONFIG_ZFCP=m
+CONFIG_ZFCPDUMP=y
+CONFIG_CCW=y
+
+#
+# S/390 block device drivers
+#
+CONFIG_DCSSBLK=m
+CONFIG_BLK_DEV_XPRAM=m
+CONFIG_DASD=m
+CONFIG_DASD_PROFILE=y
+CONFIG_DASD_ECKD=m
+CONFIG_DASD_FBA=m
+CONFIG_DASD_DIAG=m
+CONFIG_DASD_EER=y
+
+#
+# S/390 character device drivers
+#
+CONFIG_TN3270=y
+CONFIG_TN3270_CONSOLE=y
+CONFIG_TN3215=y
+CONFIG_TN3215_CONSOLE=y
+CONFIG_CCW_CONSOLE=y
+CONFIG_SCLP_TTY=y
+CONFIG_SCLP_CONSOLE=y
+CONFIG_SCLP_VT220_TTY=y
+CONFIG_SCLP_VT220_CONSOLE=y
+CONFIG_SCLP_CPI=m
+CONFIG_SCLP_ASYNC=m
+CONFIG_S390_TAPE=m
+CONFIG_S390_TAPE_3590=m
+
+CONFIG_APPLDATA_BASE=y
+CONFIG_APPLDATA_MEM=m
+CONFIG_APPLDATA_OS=m
+CONFIG_APPLDATA_NET_SUM=m
+CONFIG_TN3270_TTY=y
+CONFIG_TN3270_FS=m
+
+
+#
+# S/390 tape interface support
+#
+CONFIG_S390_TAPE_BLOCK=y
+
+#
+# S/390 tape hardware support
+#
+CONFIG_S390_TAPE_34XX=m
+
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# Token Ring devices
+#
+CONFIG_TR=y
+CONFIG_NETCONSOLE=m
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
+#
+# S/390 network device drivers
+#
+CONFIG_LCS=m
+CONFIG_CTC=m
+CONFIG_IUCV=m
+CONFIG_QETH=m
+CONFIG_QETH_IPV6=y
+CONFIG_CCWGROUP=m
+
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_B44 is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_OSF_PARTITION is not set
+CONFIG_IBM_PARTITION=y
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+
+
+#
+# S390 crypto hw
+#
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+
+#
+# Kernel hacking
+#
+
+#
+# S390 specific stack options; needs gcc 3.5 so off for now
+#
+CONFIG_PACK_STACK=y
+CONFIG_CHECK_STACK=y
+# CONFIG_WARN_STACK is not set
+# CONFIG_SMALL_STACK is not set
+
+CONFIG_ZVM_WATCHDOG=m
+CONFIG_VMLOGRDR=m
+CONFIG_MONREADER=m
+
+CONFIG_VIRT_CPU_ACCOUNTING=y
+
+# CONFIG_CLAW is not set
+
+CONFIG_VMCP=m
+
+# CONFIG_ATMEL is not set
+
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MII is not set
+
+
+CONFIG_STACK_GUARD=256
+CONFIG_CMM_IUCV=y
+
+# CONFIG_DETECT_SOFTLOCKUP is not set
+
+CONFIG_S390_HYPFS_FS=y
+
+CONFIG_MONWRITER=m
+CONFIG_ZCRYPT=m
+CONFIG_ZCRYPT_MONOLITHIC=y
+
+CONFIG_S390_SWITCH_AMODE=y
+CONFIG_S390_EXEC_PROTECT=y
+CONFIG_AFIUCV=m
+CONFIG_S390_PRNG=m
+
+CONFIG_S390_VMUR=m
+
+# CONFIG_THERMAL is not set
+
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CTCM=m
+CONFIG_QETH_L2=m
+CONFIG_QETH_L3=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_S390_GUEST=y
+
+
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_CHSC_SCH=m
+
+# drivers/isdn/hardware/mISDN/hfcmulti.c:5255:2: error: #error "not running on big endian machines now"
+# CONFIG_MISDN_HFCMULTI is not set
+
+CONFIG_HVC_IUCV=y
+
+CONFIG_RCU_FANOUT=64
+
+CONFIG_SECCOMP=y
+
+CONFIG_PM=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION="/dev/jokes"
+
+CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+
diff --git a/freed-ora/current/F-12/config-sparc64-generic b/freed-ora/current/F-12/config-sparc64-generic
new file mode 100644
index 000000000..fc09d14fd
--- /dev/null
+++ b/freed-ora/current/F-12/config-sparc64-generic
@@ -0,0 +1,196 @@
+CONFIG_SMP=y
+CONFIG_SPARC=y
+CONFIG_SPARC64=y
+CONFIG_SECCOMP=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=m
+CONFIG_CPU_FREQ_DEBUG=y
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_US3_FREQ=m
+CONFIG_US2E_FREQ=m
+
+CONFIG_SUN_LDOMS=y
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+CONFIG_64BIT=y
+# CONFIG_BBC_I2C is not set
+CONFIG_HUGETLB_PAGE_SIZE_4MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
+CONFIG_NR_CPUS=256
+CONFIG_US3_FREQ=m
+CONFIG_US2E_FREQ=m
+CONFIG_SUN_OPENPROMFS=m
+CONFIG_COMPAT=y
+CONFIG_UID16=y
+CONFIG_BINFMT_ELF32=y
+CONFIG_ENVCTRL=m
+CONFIG_DISPLAY7SEG=m
+CONFIG_WATCHDOG_CP1XXX=m
+CONFIG_WATCHDOG_RIO=m
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_PARPORT is not set
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_LIRC_PARALLEL is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_VOODOO3 is not set
+CONFIG_I2C_ALI1535=m
+# CONFIG_VGASTATE is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BW2 is not set
+CONFIG_FB_CG3=y
+CONFIG_FB_CG6=y
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+CONFIG_FB_ATY=y
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_TRIDENT is not set
+CONFIG_FB_SBUS=y
+CONFIG_FB_FFB=y
+# CONFIG_FB_TCX is not set
+# CONFIG_FB_CG14 is not set
+CONFIG_FB_PM2=y
+CONFIG_FB_P9100=y
+# CONFIG_FB_LEO is not set
+CONFIG_FB_XVR500=y
+CONFIG_FB_XVR2500=y
+# CONFIG_VGASTATE is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM_NOUVEAU is not set
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_10x18 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_SUN8x16=y
+CONFIG_FONT_SUN12x22=y
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_SERIAL_8250 is not set
+CONFIG_SERIAL_SUNZILOG=y
+CONFIG_SERIAL_SUNZILOG_CONSOLE=y
+CONFIG_SERIAL_SUNSU=y
+CONFIG_SERIAL_SUNSU_CONSOLE=y
+CONFIG_SERIAL_SUNSAB=y
+CONFIG_SERIAL_SUNSAB_CONSOLE=y
+CONFIG_SERIAL_SUNHV=y
+CONFIG_SUN_OPENPROMIO=y
+CONFIG_OBP_FLASH=m
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_BLK_DEV_FD=y
+CONFIG_SUNVDC=m
+CONFIG_SUNVNET=m
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+CONFIG_SCSI_QLOGICPTI=m
+CONFIG_SCSI_SUNESP=m
+CONFIG_SUNLANCE=m
+CONFIG_SUNBMAC=m
+CONFIG_SUNQE=m
+# CONFIG_DM9102 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_R8169 is not set
+CONFIG_ATM_FORE200E_USE_TASKLET=y
+CONFIG_ATM_FORE200E_DEBUG=0
+CONFIG_ATM_FORE200E_TX_RETRY=16
+# CONFIG_DRM_TDFX is not set
+CONFIG_KEYBOARD_ATKBD=y
+CONFIG_KEYBOARD_SUNKBD=y
+# CONFIG_INPUT_PCSPKR is not set
+CONFIG_INPUT_SPARCSPKR=m
+# CONFIG_SOUND_PRIME is not set
+# CONFIG_SND_SUN_AMD7930 is not set
+CONFIG_SND_SUN_CS4231=m
+# CONFIG_SND_SUN_DBRI is not set
+CONFIG_PARPORT_SUNBPP=m
+CONFIG_LOGO_SUN_CLUT224=y
+CONFIG_MTD_SUN_UFLASH=m
+CONFIG_MYRI_SBUS=m
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_VIDEO_ZORAN is not set
+# CONFIG_VIDEO_STRADIS is not set
+# CONFIG_IEEE1394_SBP2 is not set
+# CONFIG_USB_NET2280 is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_DCFLUSH is not set
+# CONFIG_DEBUG_BOOTMEM is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_LOCKDEP is not set
+# CONFIG_STACK_DEBUG is not set
+
+CONFIG_SPARSEMEM_VMEMMAP=y
+
+# CONFIG_THERMAL is not set
+
+CONFIG_FRAME_WARN=2048
+
+CONFIG_NUMA=y
+
+CONFIG_SND_SPARC=y
+
+CONFIG_HW_RANDOM_N2RNG=m
+
+# drivers/isdn/hardware/mISDN/hfcmulti.c:5255:2: error: #error "not running on big endian machines now"
+# CONFIG_MISDN_HFCMULTI is not set
+
+CONFIG_US3_MC=y
+CONFIG_SENSORS_ULTRA45=m
+CONFIG_LEDS_SUNFIRE=m
+CONFIG_TADPOLE_TS102_UCTRL=m
+
+CONFIG_RCU_FANOUT=64
+
+CONFIG_LIRC_ENE0100=m
+# CONFIG_BATTERY_DS2782 is not set
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SN9C20X_EVDEV=y
+CONFIG_LSM_MMAP_MIN_ADDR=65536
+
+CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
diff --git a/freed-ora/current/F-12/config-x86-generic b/freed-ora/current/F-12/config-x86-generic
new file mode 100644
index 000000000..e0f1d1ebc
--- /dev/null
+++ b/freed-ora/current/F-12/config-x86-generic
@@ -0,0 +1,476 @@
+CONFIG_UID16=y
+# CONFIG_64BIT is not set
+# CONFIG_KERNEL_LZMA is not set
+
+#
+# Processor type and features
+#
+#
+# Enable summit and co via the generic arch
+#
+CONFIG_X86_EXTENDED_PLATFORM=y
+CONFIG_X86_32_NON_STANDARD=y
+
+# CONFIG_X86_ELAN is not set
+# CONFIG_X86_NUMAQ is not set
+# CONFIG_X86_SUMMIT is not set
+CONFIG_X86_BIGSMP=y
+# CONFIG_X86_VISWS is not set
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_ES7000 is not set
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+CONFIG_M686=y
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=32
+CONFIG_X86_GENERIC=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_PPRO_FENCE=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_HPET=y
+CONFIG_HPET_TIMER=y
+# CONFIG_HPET_MMAP is not set
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+CONFIG_X86_TSC=y
+CONFIG_X86_MCE=y
+CONFIG_TOSHIBA=m
+CONFIG_I8K=m
+CONFIG_SONYPI=m
+CONFIG_SONYPI_COMPAT=y
+CONFIG_MICROCODE=m
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_X86_CPU_DEBUG is not set
+CONFIG_EDD=m
+# CONFIG_EDD_OFF is not set
+# CONFIG_NUMA is not set
+
+# CONFIG_NOHIGHMEM is not set
+CONFIG_HIGHMEM4G=y
+# CONFIG_HIGHMEM64G is not set
+CONFIG_HIGHMEM=y
+CONFIG_HIGHPTE=y
+
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+CONFIG_X86_PAT=y
+CONFIG_X86_PM_TIMER=y
+
+CONFIG_EFI=y
+CONFIG_EFI_VARS=y
+CONFIG_EFI_PCDP=y
+CONFIG_FB_EFI=y
+# CONFIG_FB_N411 is not set
+
+CONFIG_DMAR=y
+CONFIG_DMAR_BROKEN_GFX_WA=y
+CONFIG_DMAR_FLOPPY_WA=y
+CONFIG_DMAR_DEFAULT_ON=y
+
+CONFIG_FB_GEODE=y
+CONFIG_FB_GEODE_LX=y
+CONFIG_FB_GEODE_GX=y
+# CONFIG_FB_GEODE_GX1 is not set
+
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GODIRECT is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+CONFIG_PCI_GOANY=y
+
+#
+# x86 specific drivers
+#
+CONFIG_PCMCIA_FDOMAIN=m
+CONFIG_SCSI_FUTURE_DOMAIN=m
+CONFIG_SCSI_ADVANSYS=m
+
+CONFIG_CC_STACKPROTECTOR=y
+
+CONFIG_SECCOMP=y
+
+CONFIG_CAPI_EICON=y
+
+CONFIG_I2O=m
+CONFIG_I2O_BLOCK=m
+CONFIG_I2O_SCSI=m
+CONFIG_I2O_PROC=m
+CONFIG_I2O_CONFIG=y
+CONFIG_I2O_EXT_ADAPTEC=y
+CONFIG_I2O_EXT_ADAPTEC_DMA64=y
+CONFIG_I2O_CONFIG_OLD_IOCTL=y
+CONFIG_I2O_BUS=m
+
+#
+# APM (Advanced Power Management) BIOS Support
+#
+CONFIG_APM=y
+# CONFIG_APM_IGNORE_USER_SUSPEND is not set
+# CONFIG_APM_DO_ENABLE is not set
+CONFIG_APM_CPU_IDLE=y
+# CONFIG_APM_DISPLAY_BLANK is not set
+# CONFIG_APM_ALLOW_INTS is not set
+
+#
+# Kernel debugging
+#
+CONFIG_X86_MPPARSE=y
+
+CONFIG_ACPI=y
+CONFIG_ACPI_AC=y
+# CONFIG_ACPI_ASUS is not set
+CONFIG_ACPI_PROCFS_POWER=y
+CONFIG_ACPI_SYSFS_POWER=y
+CONFIG_ACPI_BATTERY=y
+CONFIG_ACPI_BLACKLIST_YEAR=1999
+CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_CONTAINER=m
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_NUMA=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_POWER=y
+CONFIG_ACPI_PROCFS=y
+CONFIG_ACPI_SBS=m
+CONFIG_ACPI_SLEEP=y
+CONFIG_ACPI_THERMAL=y
+CONFIG_TOPSTAR_LAPTOP=m
+CONFIG_ACPI_TOSHIBA=m
+CONFIG_ACPI_VIDEO=m
+# Disable in F9.
+CONFIG_ACPI_PROC_EVENT=y
+CONFIG_PNPACPI=y
+CONFIG_ACPI_POWER_METER=m
+CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
+
+#
+# CPUFreq processor drivers
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEBUG=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+
+CONFIG_X86_ACPI_CPUFREQ=m
+# CONFIG_X86_POWERNOW_K6 is not set
+CONFIG_X86_POWERNOW_K7=y
+CONFIG_X86_POWERNOW_K8=m
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+CONFIG_X86_SPEEDSTEP_ICH=y
+CONFIG_X86_SPEEDSTEP_SMI=y
+CONFIG_X86_SPEEDSTEP_LIB=y
+# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set
+CONFIG_X86_P4_CLOCKMOD=m
+CONFIG_X86_LONGRUN=y
+# CONFIG_X86_LONGHAUL is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# e_powersaver is dangerous
+# CONFIG_X86_E_POWERSAVER is not set
+
+CONFIG_X86_HT=y
+CONFIG_X86_TRAMPOLINE=y
+
+#
+# various x86 specific drivers
+#
+CONFIG_NVRAM=y
+CONFIG_IBM_ASM=m
+CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_TWOFISH_586=m
+CONFIG_CRYPTO_DEV_PADLOCK=m
+CONFIG_CRYPTO_DEV_PADLOCK_AES=m
+CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
+
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_SCHED_SMT=y
+CONFIG_SUSPEND=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
+
+CONFIG_DEBUG_RODATA=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_4KSTACKS is not set
+CONFIG_DEBUG_NMI_TIMEOUT=5
+
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_BIOS=y
+
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_COMPAQ=m
+# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set
+CONFIG_HOTPLUG_PCI_IBM=m
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+# SHPC has half-arsed PCI probing, which makes it load on too many systems
+# CONFIG_HOTPLUG_PCI_SHPC is not set
+CONFIG_PM=y
+
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_IPW2200=m
+CONFIG_IPW2200_MONITOR=y
+CONFIG_IPW2200_RADIOTAP=y
+CONFIG_IPW2200_PROMISCUOUS=y
+CONFIG_IPW2200_QOS=y
+
+CONFIG_BLK_DEV_AMD74XX=y
+
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD756_S4882=m
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_NFORCE2=m
+CONFIG_I2C_NFORCE2_S4985=m
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+CONFIG_I2C_VOODOO3=m
+
+CONFIG_SCx200_ACB=m
+
+# CONFIG_X86_REBOOTFIXUPS is not set
+
+CONFIG_DELL_RBU=m
+CONFIG_DCDBAS=m
+
+CONFIG_PC8736x_GPIO=m
+# CONFIG_NSC_GPIO is not set
+CONFIG_CS5535_GPIO=m
+
+CONFIG_EDAC=y
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_MM_EDAC=m
+CONFIG_EDAC_AMD76X=m
+CONFIG_EDAC_E7XXX=m
+CONFIG_EDAC_E752X=m
+CONFIG_EDAC_I82860=m
+CONFIG_EDAC_I82875P=m
+CONFIG_EDAC_I82975X=m
+CONFIG_EDAC_I3000=m
+CONFIG_EDAC_I5000=m
+CONFIG_EDAC_I5100=m
+CONFIG_EDAC_I5400=m
+CONFIG_EDAC_R82600=m
+CONFIG_EDAC_AMD8131=m
+CONFIG_EDAC_AMD8111=m
+
+CONFIG_SCHED_MC=y
+
+CONFIG_SND_ISA=y
+CONFIG_SND_ES18XX=m
+
+CONFIG_TCG_INFINEON=m
+
+CONFIG_HW_RANDOM_INTEL=m
+CONFIG_HW_RANDOM_AMD=m
+CONFIG_HW_RANDOM_GEODE=m
+CONFIG_HW_RANDOM_VIA=m
+
+
+# CONFIG_COMPAT_VDSO is not set
+
+# CONFIG_SGI_IOC4 is not set
+
+CONFIG_X86_PLATFORM_DEVICES=y
+CONFIG_ASUS_LAPTOP=m
+CONFIG_COMPAL_LAPTOP=m
+CONFIG_EEEPC_LAPTOP=m
+CONFIG_FUJITSU_LAPTOP=m
+# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
+CONFIG_MSI_LAPTOP=m
+CONFIG_SONY_LAPTOP=m
+CONFIG_DELL_LAPTOP=m
+CONFIG_ACPI_WMI=m
+CONFIG_ACER_WMI=m
+CONFIG_TC1100_WMI=m
+CONFIG_HP_WMI=m
+CONFIG_DELL_WMI=m
+
+# CONFIG_SMSC37B787_WDT is not set
+CONFIG_W83697HF_WDT=m
+CONFIG_IB700_WDT=m
+
+CONFIG_RELOCATABLE=y
+CONFIG_PHYSICAL_ALIGN=0x400000
+CONFIG_PHYSICAL_START=0x400000
+CONFIG_CRASH_DUMP=y
+# CONFIG_KEXEC_JUMP is not set
+CONFIG_PROC_VMCORE=y
+CONFIG_CRASH=m
+
+CONFIG_CRYPTO_DEV_GEODE=m
+
+CONFIG_VIDEO_CAFE_CCIC=m
+
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_INTEL=m
+CONFIG_KVM_AMD=m
+CONFIG_LGUEST=m
+
+CONFIG_PARAVIRT_GUEST=y
+CONFIG_PARAVIRT=y
+# CONFIG_PARAVIRT_DEBUG is not set
+
+# PARAVIRT_SPINLOCKS has a 5% perf hit
+# CONFIG_PARAVIRT_SPINLOCKS is not set
+CONFIG_KVM_CLOCK=y
+CONFIG_KVM_GUEST=y
+CONFIG_LGUEST_GUEST=y
+CONFIG_VMI=y
+
+CONFIG_XEN=y
+CONFIG_XEN_MAX_DOMAIN_MEMORY=8
+CONFIG_XEN_BALLOON=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_XEN_SAVE_RESTORE=y
+CONFIG_HVC_XEN=y
+CONFIG_XEN_FBDEV_FRONTEND=y
+CONFIG_XEN_KBDDEV_FRONTEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=m
+CONFIG_XEN_NETDEV_FRONTEND=m
+CONFIG_XENFS=m
+CONFIG_XEN_COMPAT_XENFS=y
+
+CONFIG_MTD_ESB2ROM=m
+CONFIG_MTD_CK804XROM=m
+CONFIG_MTD_NAND_CAFE=m
+
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_GOV_LADDER is not set
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+CONFIG_THINKPAD_ACPI=m
+# CONFIG_THINKPAD_ACPI_DEBUG is not set
+# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
+CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
+CONFIG_THINKPAD_ACPI_VIDEO=y
+# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
+
+CONFIG_MACINTOSH_DRIVERS=y
+
+CONFIG_DMIID=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_ISCSI_IBFT=m
+
+CONFIG_DMADEVICES=y
+CONFIG_INTEL_IOATDMA=m
+
+CONFIG_SENSORS_I5K_AMB=m
+
+# CONFIG_CPA_DEBUG is not set
+# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
+
+CONFIG_HP_WATCHDOG=m
+
+CONFIG_OLPC=y
+CONFIG_BATTERY_OLPC=y
+CONFIG_MOUSE_PS2_OLPC=y
+
+CONFIG_STRICT_DEVMEM=y
+
+# CONFIG_MEMTEST is not set
+# CONFIG_MAXSMP is not set
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_SYSPROF_TRACER=y
+
+# CONFIG_X86_VERBOSE_BOOTUP is not set
+# CONFIG_MMIOTRACE_TEST is not set
+
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+
+CONFIG_HP_ILO=m
+
+CONFIG_BACKLIGHT_MBP_NVIDIA=m
+
+CONFIG_OPROFILE_IBS=y
+CONFIG_MICROCODE_INTEL=y
+CONFIG_MICROCODE_AMD=y
+
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW_64K=y
+
+# CONFIG_CMDLINE_BOOL is not set
+
+CONFIG_PANASONIC_LAPTOP=m
+
+CONFIG_XEN_DEBUG_FS=y
+CONFIG_X86_PTRACE_BTS=y
+
+CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+
+CONFIG_POWER_TRACER=y
+CONFIG_HW_BRANCH_TRACER=y
+
+# CONFIG_SPARSE_IRQ is not set
+
+CONFIG_RCU_FANOUT=32
+
+# CONFIG_IOMMU_STRESS is not set
+
+CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+
+CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
+# CONFIG_X86_ANCIENT_MCE is not set
+# CONFIG_X86_MCE_INJECT is not set
+
+# CONFIG_X86_MRST is not set
+CONFIG_SFI=y
+
+CONFIG_INPUT_WINBOND_CIR=m
+CONFIG_I2C_SCMI=m
+CONFIG_SBC_FITPC2_WATCHDOG=m
+CONFIG_EDAC_I3200=m
+CONFIG_EDAC_DECODE_MCE=m
+
+CONFIG_GPIO_LANGWELL=y
+
+# CONFIG_INTEL_TXT is not set
+
+CONFIG_ACERHDF=m
diff --git a/freed-ora/current/F-12/config-x86_64-generic b/freed-ora/current/F-12/config-x86_64-generic
new file mode 100644
index 000000000..175f57b09
--- /dev/null
+++ b/freed-ora/current/F-12/config-x86_64-generic
@@ -0,0 +1,387 @@
+CONFIG_64BIT=y
+CONFIG_UID16=y
+# CONFIG_KERNEL_LZMA is not set
+
+# CONFIG_MK8 is not set
+# CONFIG_MPSC is not set
+CONFIG_GENERIC_CPU=y
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_VSMP is not set
+# CONFIG_X86_UV is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_X86_CPU_DEBUG is not set
+CONFIG_MTRR=y
+CONFIG_NUMA=y
+CONFIG_K8_NUMA=y
+CONFIG_X86_64_ACPI_NUMA=y
+# CONFIG_NUMA_EMU is not set
+CONFIG_NR_CPUS=256
+CONFIG_X86_POWERNOW_K8=m
+CONFIG_X86_P4_CLOCKMOD=m
+CONFIG_IA32_EMULATION=y
+# CONFIG_IA32_AOUT is not set
+# CONFIG_IOMMU_DEBUG is not set
+CONFIG_DEBUG_RODATA=y
+CONFIG_MICROCODE=m
+CONFIG_SWIOTLB=y
+CONFIG_CALGARY_IOMMU=y
+CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
+CONFIG_X86_PM_TIMER=y
+CONFIG_EDD=m
+# CONFIG_EDD_OFF is not set
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_DMAR=y
+CONFIG_DMAR_BROKEN_GFX_WA=y
+CONFIG_DMAR_FLOPPY_WA=y
+CONFIG_DMAR_DEFAULT_ON=y
+
+CONFIG_KEXEC_JUMP=y
+
+CONFIG_EFI=y
+CONFIG_EFI_VARS=y
+CONFIG_EFI_PCDP=y
+CONFIG_FB_EFI=y
+
+CONFIG_I2O=m
+CONFIG_I2O_BLOCK=m
+CONFIG_I2O_SCSI=m
+CONFIG_I2O_PROC=m
+CONFIG_I2O_CONFIG=y
+CONFIG_I2O_EXT_ADAPTEC=y
+CONFIG_I2O_EXT_ADAPTEC_DMA64=y
+CONFIG_I2O_CONFIG_OLD_IOCTL=y
+CONFIG_I2O_BUS=m
+
+CONFIG_SECCOMP=y
+
+CONFIG_CAPI_EICON=y
+
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_SCHED_SMT=y
+CONFIG_SUSPEND=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
+
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_DEBUG=y
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+CONFIG_X86_ACPI_CPUFREQ=m
+CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+
+CONFIG_ACPI=y
+CONFIG_ACPI_AC=y
+# CONFIG_ACPI_ASUS is not set
+CONFIG_ACPI_PROCFS_POWER=y
+CONFIG_ACPI_SYSFS_POWER=y
+CONFIG_ACPI_BATTERY=y
+CONFIG_ACPI_BLACKLIST_YEAR=0
+CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_CONTAINER=m
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_HOTPLUG_MEMORY=m
+CONFIG_ACPI_NUMA=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_PROCFS=y
+CONFIG_ACPI_SBS=m
+CONFIG_ACPI_SLEEP=y
+CONFIG_ACPI_THERMAL=y
+CONFIG_ACPI_TOSHIBA=m
+CONFIG_ACPI_POWER=y
+CONFIG_ACPI_VIDEO=m
+# Disable in F9.
+CONFIG_ACPI_PROC_EVENT=y
+CONFIG_ACPI_POWER_METER=m
+CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
+
+CONFIG_X86_PLATFORM_DEVICES=y
+CONFIG_ASUS_LAPTOP=m
+CONFIG_COMPAL_LAPTOP=m
+CONFIG_FUJITSU_LAPTOP=m
+# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
+CONFIG_MSI_LAPTOP=m
+CONFIG_SONY_LAPTOP=m
+CONFIG_SONYPI_COMPAT=y
+CONFIG_EEEPC_LAPTOP=m
+CONFIG_DELL_LAPTOP=m
+CONFIG_ACPI_WMI=m
+CONFIG_ACER_WMI=m
+CONFIG_HP_WMI=m
+CONFIG_DELL_WMI=m
+
+CONFIG_THINKPAD_ACPI=m
+# CONFIG_THINKPAD_ACPI_DEBUG is not set
+# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
+CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
+CONFIG_THINKPAD_ACPI_VIDEO=y
+# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
+
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_COMPAQ=m
+# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set
+CONFIG_HOTPLUG_PCI_IBM=m
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+# SHPC has half-arsed PCI probing, which makes it load on too many systems
+CONFIG_HOTPLUG_PCI_SHPC=m
+
+CONFIG_HPET=y
+# CONFIG_HPET_MMAP is not set
+CONFIG_PM=y
+
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_IPW2200=m
+CONFIG_IPW2200_MONITOR=y
+CONFIG_IPW2200_RADIOTAP=y
+CONFIG_IPW2200_PROMISCUOUS=y
+CONFIG_IPW2200_QOS=y
+
+CONFIG_PNP=y
+CONFIG_PNPACPI=y
+
+CONFIG_BLK_DEV_AMD74XX=y
+CONFIG_CRYPTO_DEV_PADLOCK=m
+CONFIG_CRYPTO_DEV_PADLOCK_AES=m
+CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
+# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES_X86_64=m
+# CONFIG_CRYPTO_TWOFISH is not set
+CONFIG_CRYPTO_TWOFISH_X86_64=m
+# CONFIG_CRYPTO_SALSA20 is not set
+CONFIG_CRYPTO_SALSA20_X86_64=m
+CONFIG_CRYPTO_AES_NI_INTEL=m
+
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
+
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD756_S4882=m
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_NFORCE2_S4985=m
+CONFIG_I2C_PIIX4=m
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+
+CONFIG_DELL_RBU=m
+CONFIG_DCDBAS=m
+
+CONFIG_NVRAM=y
+
+CONFIG_EDAC=y
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_MM_EDAC=m
+CONFIG_EDAC_AMD76X=m
+CONFIG_EDAC_E7XXX=m
+CONFIG_EDAC_E752X=m
+CONFIG_EDAC_I5000=m
+CONFIG_EDAC_I5100=m
+CONFIG_EDAC_I5400=m
+CONFIG_EDAC_I82875P=m
+CONFIG_EDAC_I82860=m
+CONFIG_EDAC_I82975X=m
+CONFIG_EDAC_R82600=m
+CONFIG_EDAC_AMD8131=m
+CONFIG_EDAC_AMD8111=m
+CONFIG_EDAC_AMD64=m
+# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set
+CONFIG_EDAC_DECODE_MCE=m
+
+CONFIG_SCHED_MC=y
+
+CONFIG_TCG_INFINEON=m
+
+CONFIG_HW_RANDOM_INTEL=m
+CONFIG_HW_RANDOM_AMD=m
+CONFIG_HW_RANDOM_VIA=m
+
+# CONFIG_HW_RANDOM_GEODE is not set
+
+
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_NMI_TIMEOUT=5
+
+# CONFIG_PC8736x_GPIO is not set
+
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+
+# CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_CS5535 is not set
+
+CONFIG_CC_STACKPROTECTOR=y
+
+CONFIG_SGI_IOC4=m
+CONFIG_SGI_XP=m
+CONFIG_SGI_GRU=m
+# CONFIG_SGI_GRU_DEBUG is not set
+
+# CONFIG_SMSC37B787_WDT is not set
+CONFIG_W83697HF_WDT=m
+
+# CONFIG_VIDEO_CAFE_CCIC is not set
+
+CONFIG_MTD_ESB2ROM=m
+CONFIG_MTD_CK804XROM=m
+
+CONFIG_RELOCATABLE=y
+CONFIG_MACINTOSH_DRIVERS=y
+
+CONFIG_CRASH_DUMP=y
+CONFIG_PHYSICAL_START=0x1000000
+CONFIG_PROC_VMCORE=y
+CONFIG_CRASH=m
+
+CONFIG_DMIID=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_ISCSI_IBFT=m
+
+
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_GOV_LADDER is not set
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_INTEL=m
+CONFIG_KVM_AMD=m
+
+CONFIG_PARAVIRT_GUEST=y
+CONFIG_PARAVIRT=y
+# CONFIG_PARAVIRT_DEBUG is not set
+# PARAVIRT_SPINLOCKS has a 5% perf hit
+# CONFIG_PARAVIRT_SPINLOCKS is not set
+CONFIG_KVM_CLOCK=y
+CONFIG_KVM_GUEST=y
+
+CONFIG_XEN=y
+CONFIG_XEN_MAX_DOMAIN_MEMORY=32
+CONFIG_XEN_BALLOON=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_XEN_SAVE_RESTORE=y
+CONFIG_HVC_XEN=y
+CONFIG_XEN_FBDEV_FRONTEND=y
+CONFIG_XEN_KBDDEV_FRONTEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=m
+CONFIG_XEN_NETDEV_FRONTEND=m
+CONFIG_XENFS=m
+CONFIG_XEN_COMPAT_XENFS=y
+CONFIG_XEN_DEV_EVTCHN=m
+CONFIG_XEN_SYS_HYPERVISOR=y
+
+CONFIG_DMADEVICES=y
+CONFIG_INTEL_IOATDMA=m
+
+CONFIG_SENSORS_I5K_AMB=m
+
+# CONFIG_COMPAT_VDSO is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_CPA_DEBUG is not set
+
+CONFIG_HP_WATCHDOG=m
+
+CONFIG_FRAME_WARN=2048
+
+CONFIG_NODES_SHIFT=9
+CONFIG_X86_PAT=y
+# FIXME: These should be 32bit only
+# CONFIG_FB_N411 is not set
+CONFIG_STRICT_DEVMEM=y
+
+CONFIG_DIRECT_GBPAGES=y
+
+# CONFIG_MEMTEST is not set
+CONFIG_AMD_IOMMU=y
+CONFIG_AMD_IOMMU_STATS=y
+# CONFIG_MAXSMP is not set
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_SYSPROF_TRACER=y
+# CONFIG_X86_VERBOSE_BOOTUP is not set
+# CONFIG_MMIOTRACE_TEST is not set
+
+CONFIG_X86_MPPARSE=y
+
+CONFIG_BACKLIGHT_MBP_NVIDIA=m
+
+CONFIG_OPROFILE_IBS=y
+CONFIG_MICROCODE_INTEL=y
+CONFIG_MICROCODE_AMD=y
+
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW_64K=y
+
+# CONFIG_CMDLINE_BOOL is not set
+
+CONFIG_PANASONIC_LAPTOP=m
+
+CONFIG_XEN_DEBUG_FS=y
+CONFIG_X86_PTRACE_BTS=y
+
+CONFIG_I7300_IDLE=m
+CONFIG_INTR_REMAP=y
+
+CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+
+CONFIG_POWER_TRACER=y
+CONFIG_HW_BRANCH_TRACER=y
+
+CONFIG_X86_X2APIC=y
+CONFIG_SPARSE_IRQ=y
+
+CONFIG_RCU_FANOUT=64
+
+# CONFIG_IOMMU_STRESS is not set
+
+CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+
+# CONFIG_X86_MCE_INJECT is not set
+
+CONFIG_SFI=y
+CONFIG_INPUT_WINBOND_CIR=m
+CONFIG_I2C_SCMI=m
+CONFIG_SBC_FITPC2_WATCHDOG=m
+CONFIG_EDAC_I3200=m
+CONFIG_TOPSTAR_LAPTOP=m
+# CONFIG_INTEL_TXT is not set
+CONFIG_GPIO_LANGWELL=y
+
+CONFIG_FUNCTION_GRAPH_TRACER=y
+
+CONFIG_ACERHDF=m
diff --git a/freed-ora/current/F-12/crypto-add-async-hash-testing.patch b/freed-ora/current/F-12/crypto-add-async-hash-testing.patch
new file mode 100644
index 000000000..8df0ad44f
--- /dev/null
+++ b/freed-ora/current/F-12/crypto-add-async-hash-testing.patch
@@ -0,0 +1,111 @@
+From e45009229be6a7fae49bdfa3459905668c0b0fb1 Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Wed, 19 May 2010 14:12:03 +1000
+Subject: crypto: testmgr - Add testing for async hashing and update/final
+
+Extend testmgr such that it tests async hash algorithms,
+and that for both sync and async hashes it tests both
+->digest() and ->update()/->final() sequences.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+ crypto/testmgr.c | 66 +++++++++++++++++++++++++++++++++++++++--------------
+ 1 files changed, 48 insertions(+), 18 deletions(-)
+
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index c494d76..5c8aaa0 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
+ free_page((unsigned long)buf[i]);
+ }
+
++static int do_one_async_hash_op(struct ahash_request *req,
++ struct tcrypt_result *tr,
++ int ret)
++{
++ if (ret == -EINPROGRESS || ret == -EBUSY) {
++ ret = wait_for_completion_interruptible(&tr->completion);
++ if (!ret)
++ ret = tr->err;
++ INIT_COMPLETION(tr->completion);
++ }
++ return ret;
++}
++
+ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+- unsigned int tcount)
++ unsigned int tcount, bool use_digest)
+ {
+ const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
+ unsigned int i, j, k, temp;
+@@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
+ }
+
+ ahash_request_set_crypt(req, sg, result, template[i].psize);
+- ret = crypto_ahash_digest(req);
+- switch (ret) {
+- case 0:
+- break;
+- case -EINPROGRESS:
+- case -EBUSY:
+- ret = wait_for_completion_interruptible(
+- &tresult.completion);
+- if (!ret && !(ret = tresult.err)) {
+- INIT_COMPLETION(tresult.completion);
+- break;
++ if (use_digest) {
++ ret = do_one_async_hash_op(req, &tresult,
++ crypto_ahash_digest(req));
++ if (ret) {
++ pr_err("alg: hash: digest failed on test %d "
++ "for %s: ret=%d\n", j, algo, -ret);
++ goto out;
++ }
++ } else {
++ ret = do_one_async_hash_op(req, &tresult,
++ crypto_ahash_init(req));
++ if (ret) {
++ pr_err("alt: hash: init failed on test %d "
++ "for %s: ret=%d\n", j, algo, -ret);
++ goto out;
++ }
++ ret = do_one_async_hash_op(req, &tresult,
++ crypto_ahash_update(req));
++ if (ret) {
++ pr_err("alt: hash: update failed on test %d "
++ "for %s: ret=%d\n", j, algo, -ret);
++ goto out;
++ }
++ ret = do_one_async_hash_op(req, &tresult,
++ crypto_ahash_final(req));
++ if (ret) {
++ pr_err("alt: hash: final failed on test %d "
++ "for %s: ret=%d\n", j, algo, -ret);
++ goto out;
+ }
+- /* fall through */
+- default:
+- printk(KERN_ERR "alg: hash: digest failed on test %d "
+- "for %s: ret=%d\n", j, algo, -ret);
+- goto out;
+ }
+
+ if (memcmp(result, template[i].digest,
+@@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
+ return PTR_ERR(tfm);
+ }
+
+- err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count);
++ err = test_hash(tfm, desc->suite.hash.vecs,
++ desc->suite.hash.count, true);
++ if (!err)
++ err = test_hash(tfm, desc->suite.hash.vecs,
++ desc->suite.hash.count, false);
+
+ crypto_free_ahash(tfm);
+ return err;
+--
+1.7.0.1
+
diff --git a/freed-ora/current/F-12/crypto-testmgr-add-null-test-for-aesni.patch b/freed-ora/current/F-12/crypto-testmgr-add-null-test-for-aesni.patch
new file mode 100644
index 000000000..b38a6f914
--- /dev/null
+++ b/freed-ora/current/F-12/crypto-testmgr-add-null-test-for-aesni.patch
@@ -0,0 +1,138 @@
+From: Youquan, Song <youquan.song@intel.com>
+Date: Wed, 23 Dec 2009 11:45:20 +0000 (+0800)
+Subject: crypto: testmgr - Fix complain about lack test for internal used algorithm
+X-Git-Tag: v2.6.34-rc1~286^2~28
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=863b557a88f8c033f7419fabafef4712a5055f85
+
+crypto: testmgr - Fix complain about lack test for internal used algorithm
+
+When load aesni-intel and ghash_clmulni-intel driver,kernel will complain no
+ test for some internal used algorithm.
+The strange information as following:
+
+alg: No test for __aes-aesni (__driver-aes-aesni)
+alg: No test for __ecb-aes-aesni (__driver-ecb-aes-aesni)
+alg: No test for __cbc-aes-aesni (__driver-cbc-aes-aesni)
+alg: No test for __ecb-aes-aesni (cryptd(__driver-ecb-aes-aesni)
+alg: No test for __ghash (__ghash-pclmulqdqni)
+alg: No test for __ghash (cryptd(__ghash-pclmulqdqni))
+
+This patch add NULL test entries for these algorithm and driver.
+
+Signed-off-by: Youquan, Song <youquan.song@intel.com>
+Signed-off-by: Ying, Huang <ying.huang@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 7620bfc..c494d76 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -1477,9 +1477,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
+ return err;
+ }
+
++static int alg_test_null(const struct alg_test_desc *desc,
++ const char *driver, u32 type, u32 mask)
++{
++ return 0;
++}
++
+ /* Please keep this list sorted by algorithm name. */
+ static const struct alg_test_desc alg_test_descs[] = {
+ {
++ .alg = "__driver-cbc-aes-aesni",
++ .test = alg_test_null,
++ .suite = {
++ .cipher = {
++ .enc = {
++ .vecs = NULL,
++ .count = 0
++ },
++ .dec = {
++ .vecs = NULL,
++ .count = 0
++ }
++ }
++ }
++ }, {
++ .alg = "__driver-ecb-aes-aesni",
++ .test = alg_test_null,
++ .suite = {
++ .cipher = {
++ .enc = {
++ .vecs = NULL,
++ .count = 0
++ },
++ .dec = {
++ .vecs = NULL,
++ .count = 0
++ }
++ }
++ }
++ }, {
++ .alg = "__ghash-pclmulqdqni",
++ .test = alg_test_null,
++ .suite = {
++ .hash = {
++ .vecs = NULL,
++ .count = 0
++ }
++ }
++ }, {
+ .alg = "ansi_cprng",
+ .test = alg_test_cprng,
+ .fips_allowed = 1,
+@@ -1623,6 +1668,30 @@ static const struct alg_test_desc alg_test_descs[] = {
+ }
+ }
+ }, {
++ .alg = "cryptd(__driver-ecb-aes-aesni)",
++ .test = alg_test_null,
++ .suite = {
++ .cipher = {
++ .enc = {
++ .vecs = NULL,
++ .count = 0
++ },
++ .dec = {
++ .vecs = NULL,
++ .count = 0
++ }
++ }
++ }
++ }, {
++ .alg = "cryptd(__ghash-pclmulqdqni)",
++ .test = alg_test_null,
++ .suite = {
++ .hash = {
++ .vecs = NULL,
++ .count = 0
++ }
++ }
++ }, {
+ .alg = "ctr(aes)",
+ .test = alg_test_skcipher,
+ .fips_allowed = 1,
+@@ -1669,6 +1738,21 @@ static const struct alg_test_desc alg_test_descs[] = {
+ }
+ }
+ }, {
++ .alg = "ecb(__aes-aesni)",
++ .test = alg_test_null,
++ .suite = {
++ .cipher = {
++ .enc = {
++ .vecs = NULL,
++ .count = 0
++ },
++ .dec = {
++ .vecs = NULL,
++ .count = 0
++ }
++ }
++ }
++ }, {
+ .alg = "ecb(aes)",
+ .test = alg_test_skcipher,
+ .fips_allowed = 1,
diff --git a/freed-ora/current/F-12/crystalhd-2.6.34-staging.patch b/freed-ora/current/F-12/crystalhd-2.6.34-staging.patch
new file mode 100644
index 000000000..671bd3911
--- /dev/null
+++ b/freed-ora/current/F-12/crystalhd-2.6.34-staging.patch
@@ -0,0 +1,8287 @@
+Broadcom Crystal HD video decoder driver from upstream staging/linux-next.
+
+Signed-off-by: Jarod Wilson <jarod@redhat.com>
+
+---
+ drivers/staging/Kconfig | 2 +
+ drivers/staging/Makefile | 1 +
+ drivers/staging/crystalhd/Kconfig | 6 +
+ drivers/staging/crystalhd/Makefile | 6 +
+ drivers/staging/crystalhd/TODO | 16 +
+ drivers/staging/crystalhd/bc_dts_defs.h | 498 ++++++
+ drivers/staging/crystalhd/bc_dts_glob_lnx.h | 299 ++++
+ drivers/staging/crystalhd/bc_dts_types.h | 121 ++
+ drivers/staging/crystalhd/bcm_70012_regs.h | 757 +++++++++
+ drivers/staging/crystalhd/crystalhd_cmds.c | 1058 ++++++++++++
+ drivers/staging/crystalhd/crystalhd_cmds.h | 88 +
+ drivers/staging/crystalhd/crystalhd_fw_if.h | 369 ++++
+ drivers/staging/crystalhd/crystalhd_hw.c | 2395 +++++++++++++++++++++++++++
+ drivers/staging/crystalhd/crystalhd_hw.h | 398 +++++
+ drivers/staging/crystalhd/crystalhd_lnx.c | 780 +++++++++
+ drivers/staging/crystalhd/crystalhd_lnx.h | 95 ++
+ drivers/staging/crystalhd/crystalhd_misc.c | 1029 ++++++++++++
+ drivers/staging/crystalhd/crystalhd_misc.h | 229 +++
+ 18 files changed, 8147 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
+index 94eb863..61ec152 100644
+--- a/drivers/staging/Kconfig
++++ b/drivers/staging/Kconfig
+@@ -145,5 +145,7 @@ source "drivers/staging/netwave/Kconfig"
+
+ source "drivers/staging/iio/Kconfig"
+
++source "drivers/staging/crystalhd/Kconfig"
++
+ endif # !STAGING_EXCLUDE_BUILD
+ endif # STAGING
+diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
+index b5e67b8..dc40493 100644
+--- a/drivers/staging/Makefile
++++ b/drivers/staging/Makefile
+@@ -53,3 +53,4 @@ obj-$(CONFIG_WAVELAN) += wavelan/
+ obj-$(CONFIG_RAR_REGISTER) += rar/
+ obj-$(CONFIG_DX_SEP) += sep/
+ obj-$(CONFIG_IIO) += iio/
++obj-$(CONFIG_CRYSTALHD) += crystalhd/
+diff --git a/drivers/staging/crystalhd/Kconfig b/drivers/staging/crystalhd/Kconfig
+new file mode 100644
+index 0000000..56b414b
+--- /dev/null
++++ b/drivers/staging/crystalhd/Kconfig
+@@ -0,0 +1,6 @@
++config CRYSTALHD
++ tristate "Broadcom Crystal HD video decoder support"
++ depends on PCI
++ default n
++ help
++ Support for the Broadcom Crystal HD video decoder chipset
+diff --git a/drivers/staging/crystalhd/Makefile b/drivers/staging/crystalhd/Makefile
+new file mode 100644
+index 0000000..e2af0ce
+--- /dev/null
++++ b/drivers/staging/crystalhd/Makefile
+@@ -0,0 +1,6 @@
++obj-$(CONFIG_CRYSTALHD) += crystalhd.o
++
++crystalhd-objs := crystalhd_cmds.o \
++ crystalhd_hw.o \
++ crystalhd_lnx.o \
++ crystalhd_misc.o
+diff --git a/drivers/staging/crystalhd/TODO b/drivers/staging/crystalhd/TODO
+new file mode 100644
+index 0000000..69be5d0
+--- /dev/null
++++ b/drivers/staging/crystalhd/TODO
+@@ -0,0 +1,16 @@
++- Testing
++- Cleanup return codes
++- Cleanup typedefs
++- Cleanup all WIN* references
++- Allocate an Accelerator device class specific Major number,
++ since we don't have any other open sourced accelerators, it is the only
++ one in that category for now.
++ A somewhat similar device is the DXR2/3
++
++Please send patches to:
++Greg Kroah-Hartman <greg@kroah.com>
++Naren Sankar <nsankar@broadcom.com>
++Jarod Wilson <jarod@wilsonet.com>
++Scott Davilla <davilla@4pi.com>
++Manu Abraham <abraham.manu@gmail.com>
++
+diff --git a/drivers/staging/crystalhd/bc_dts_defs.h b/drivers/staging/crystalhd/bc_dts_defs.h
+new file mode 100644
+index 0000000..c34cc07
+--- /dev/null
++++ b/drivers/staging/crystalhd/bc_dts_defs.h
+@@ -0,0 +1,498 @@
++/********************************************************************
++ * Copyright(c) 2006-2009 Broadcom Corporation.
++ *
++ * Name: bc_dts_defs.h
++ *
++ * Description: Common definitions for all components. Only types
++ * is allowed to be included from this file.
++ *
++ * AU
++ *
++ * HISTORY:
++ *
++ ********************************************************************
++ * This header is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as published
++ * by the Free Software Foundation, either version 2.1 of the License.
++ *
++ * This header is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU Lesser General Public License for more details.
++ * You should have received a copy of the GNU Lesser General Public License
++ * along with this header. If not, see <http://www.gnu.org/licenses/>.
++ *******************************************************************/
++
++#ifndef _BC_DTS_DEFS_H_
++#define _BC_DTS_DEFS_H_
++
++#include "bc_dts_types.h"
++
++/* BIT Mask */
++#define BC_BIT(_x) (1 << (_x))
++
++typedef enum _BC_STATUS {
++ BC_STS_SUCCESS = 0,
++ BC_STS_INV_ARG = 1,
++ BC_STS_BUSY = 2,
++ BC_STS_NOT_IMPL = 3,
++ BC_STS_PGM_QUIT = 4,
++ BC_STS_NO_ACCESS = 5,
++ BC_STS_INSUFF_RES = 6,
++ BC_STS_IO_ERROR = 7,
++ BC_STS_NO_DATA = 8,
++ BC_STS_VER_MISMATCH = 9,
++ BC_STS_TIMEOUT = 10,
++ BC_STS_FW_CMD_ERR = 11,
++ BC_STS_DEC_NOT_OPEN = 12,
++ BC_STS_ERR_USAGE = 13,
++ BC_STS_IO_USER_ABORT = 14,
++ BC_STS_IO_XFR_ERROR = 15,
++ BC_STS_DEC_NOT_STARTED = 16,
++ BC_STS_FWHEX_NOT_FOUND = 17,
++ BC_STS_FMT_CHANGE = 18,
++ BC_STS_HIF_ACCESS = 19,
++ BC_STS_CMD_CANCELLED = 20,
++ BC_STS_FW_AUTH_FAILED = 21,
++ BC_STS_BOOTLOADER_FAILED = 22,
++ BC_STS_CERT_VERIFY_ERROR = 23,
++ BC_STS_DEC_EXIST_OPEN = 24,
++ BC_STS_PENDING = 25,
++ BC_STS_CLK_NOCHG = 26,
++
++ /* Must be the last one.*/
++ BC_STS_ERROR = -1
++} BC_STATUS;
++
++/*------------------------------------------------------*
++ * Registry Key Definitions *
++ *------------------------------------------------------*/
++#define BC_REG_KEY_MAIN_PATH "Software\\Broadcom\\MediaPC\\70010"
++#define BC_REG_KEY_FWPATH "FirmwareFilePath"
++#define BC_REG_KEY_SEC_OPT "DbgOptions"
++
++/*
++ * Options:
++ *
++ * b[5] = Enable RSA KEY in EEPROM Support
++ * b[6] = Enable Old PIB scheme. (0 = Use PIB with video scheme)
++ *
++ * b[12] = Enable send message to NotifyIcon
++ *
++ */
++
++typedef enum _BC_SW_OPTIONS {
++ BC_OPT_DOSER_OUT_ENCRYPT = BC_BIT(3),
++ BC_OPT_LINK_OUT_ENCRYPT = BC_BIT(29),
++} BC_SW_OPTIONS;
++
++typedef struct _BC_REG_CONFIG{
++ uint32_t DbgOptions;
++} BC_REG_CONFIG;
++
++#if defined(__KERNEL__) || defined(__LINUX_USER__)
++#else
++/* Align data structures */
++#define ALIGN(x) __declspec(align(x))
++#endif
++
++/* mode
++ * b[0]..b[7] = _DtsDeviceOpenMode
++ * b[8] = Load new FW
++ * b[9] = Load file play back FW
++ * b[10] = Disk format (0 for HD DVD and 1 for BLU ray)
++ * b[11]-b[15] = default output resolution
++ * b[16] = Skip TX CPB Buffer Check
++ * b[17] = Adaptive Output Encrypt/Scramble Scheme
++ * b[18]-b[31] = reserved for future use
++ */
++
++/* To allow multiple apps to open the device. */
++enum _DtsDeviceOpenMode {
++ DTS_PLAYBACK_MODE = 0,
++ DTS_DIAG_MODE,
++ DTS_MONITOR_MODE,
++ DTS_HWINIT_MODE
++};
++
++/* To enable the filter to selectively enable/disable fixes or erratas */
++enum _DtsDeviceFixMode {
++ DTS_LOAD_NEW_FW = BC_BIT(8),
++ DTS_LOAD_FILE_PLAY_FW = BC_BIT(9),
++ DTS_DISK_FMT_BD = BC_BIT(10),
++ /* b[11]-b[15] : Default output resolution */
++ DTS_SKIP_TX_CHK_CPB = BC_BIT(16),
++ DTS_ADAPTIVE_OUTPUT_PER = BC_BIT(17),
++ DTS_INTELLIMAP = BC_BIT(18),
++ /* b[19]-b[21] : select clock frequency */
++ DTS_PLAYBACK_DROP_RPT_MODE = BC_BIT(22)
++};
++
++#define DTS_DFLT_RESOLUTION(x) (x<<11)
++
++#define DTS_DFLT_CLOCK(x) (x<<19)
++
++/* F/W File Version corresponding to S/W Releases */
++enum _FW_FILE_VER {
++ /* S/W release: 02.04.02 F/W release 2.12.2.0 */
++ BC_FW_VER_020402 = ((12<<16) | (2<<8) | (0))
++};
++
++/*------------------------------------------------------*
++ * Stream Types for DtsOpenDecoder() *
++ *------------------------------------------------------*/
++enum _DtsOpenDecStreamTypes {
++ BC_STREAM_TYPE_ES = 0,
++ BC_STREAM_TYPE_PES = 1,
++ BC_STREAM_TYPE_TS = 2,
++ BC_STREAM_TYPE_ES_TSTAMP = 6,
++};
++
++/*------------------------------------------------------*
++ * Video Algorithms for DtsSetVideoParams() *
++ *------------------------------------------------------*/
++enum _DtsSetVideoParamsAlgo {
++ BC_VID_ALGO_H264 = 0,
++ BC_VID_ALGO_MPEG2 = 1,
++ BC_VID_ALGO_VC1 = 4,
++ BC_VID_ALGO_VC1MP = 7,
++};
++
++/*------------------------------------------------------*
++ * MPEG Extension to the PPB *
++ *------------------------------------------------------*/
++#define BC_MPEG_VALID_PANSCAN (1)
++
++typedef struct _BC_PIB_EXT_MPEG {
++ uint32_t valid;
++ /* Always valid, defaults to picture size if no
++ * sequence display extension in the stream. */
++ uint32_t display_horizontal_size;
++ uint32_t display_vertical_size;
++
++ /* MPEG_VALID_PANSCAN
++ * Offsets are a copy values from the MPEG stream. */
++ uint32_t offset_count;
++ int32_t horizontal_offset[3];
++ int32_t vertical_offset[3];
++
++} BC_PIB_EXT_MPEG;
++
++/*------------------------------------------------------*
++ * H.264 Extension to the PPB *
++ *------------------------------------------------------*/
++/* Bit definitions for 'other.h264.valid' field */
++#define H264_VALID_PANSCAN (1)
++#define H264_VALID_SPS_CROP (2)
++#define H264_VALID_VUI (4)
++
++typedef struct _BC_PIB_EXT_H264 {
++ /* 'valid' specifies which fields (or sets of
++ * fields) below are valid. If the corresponding
++ * bit in 'valid' is NOT set then that field(s)
++ * is (are) not initialized. */
++ uint32_t valid;
++
++ /* H264_VALID_PANSCAN */
++ uint32_t pan_scan_count;
++ int32_t pan_scan_left[3];
++ int32_t pan_scan_right[3];
++ int32_t pan_scan_top[3];
++ int32_t pan_scan_bottom[3];
++
++ /* H264_VALID_SPS_CROP */
++ int32_t sps_crop_left;
++ int32_t sps_crop_right;
++ int32_t sps_crop_top;
++ int32_t sps_crop_bottom;
++
++ /* H264_VALID_VUI */
++ uint32_t chroma_top;
++ uint32_t chroma_bottom;
++
++} BC_PIB_EXT_H264;
++
++/*------------------------------------------------------*
++ * VC1 Extension to the PPB *
++ *------------------------------------------------------*/
++#define VC1_VALID_PANSCAN (1)
++
++typedef struct _BC_PIB_EXT_VC1 {
++ uint32_t valid;
++
++ /* Always valid, defaults to picture size if no
++ * sequence display extension in the stream. */
++ uint32_t display_horizontal_size;
++ uint32_t display_vertical_size;
++
++ /* VC1 pan scan windows */
++ uint32_t num_panscan_windows;
++ int32_t ps_horiz_offset[4];
++ int32_t ps_vert_offset[4];
++ int32_t ps_width[4];
++ int32_t ps_height[4];
++
++} BC_PIB_EXT_VC1;
++
++
++/*------------------------------------------------------*
++ * Picture Information Block *
++ *------------------------------------------------------*/
++#if defined(_WIN32) || defined(_WIN64) || defined(__LINUX_USER__)
++/* Values for 'pulldown' field. '0' means no pulldown information
++ * was present for this picture. */
++enum {
++ vdecNoPulldownInfo = 0,
++ vdecTop = 1,
++ vdecBottom = 2,
++ vdecTopBottom = 3,
++ vdecBottomTop = 4,
++ vdecTopBottomTop = 5,
++ vdecBottomTopBottom = 6,
++ vdecFrame_X2 = 7,
++ vdecFrame_X3 = 8,
++ vdecFrame_X1 = 9,
++ vdecFrame_X4 = 10,
++};
++
++/* Values for the 'frame_rate' field. */
++enum {
++ vdecFrameRateUnknown = 0,
++ vdecFrameRate23_97,
++ vdecFrameRate24,
++ vdecFrameRate25,
++ vdecFrameRate29_97,
++ vdecFrameRate30,
++ vdecFrameRate50,
++ vdecFrameRate59_94,
++ vdecFrameRate60,
++};
++
++/* Values for the 'aspect_ratio' field. */
++enum {
++ vdecAspectRatioUnknown = 0,
++ vdecAspectRatioSquare,
++ vdecAspectRatio12_11,
++ vdecAspectRatio10_11,
++ vdecAspectRatio16_11,
++ vdecAspectRatio40_33,
++ vdecAspectRatio24_11,
++ vdecAspectRatio20_11,
++ vdecAspectRatio32_11,
++ vdecAspectRatio80_33,
++ vdecAspectRatio18_11,
++ vdecAspectRatio15_11,
++ vdecAspectRatio64_33,
++ vdecAspectRatio160_99,
++ vdecAspectRatio4_3,
++ vdecAspectRatio16_9,
++ vdecAspectRatio221_1,
++ vdecAspectRatioOther = 255,
++};
++
++/* Values for the 'colour_primaries' field. */
++enum {
++ vdecColourPrimariesUnknown = 0,
++ vdecColourPrimariesBT709,
++ vdecColourPrimariesUnspecified,
++ vdecColourPrimariesReserved,
++ vdecColourPrimariesBT470_2M = 4,
++ vdecColourPrimariesBT470_2BG,
++ vdecColourPrimariesSMPTE170M,
++ vdecColourPrimariesSMPTE240M,
++ vdecColourPrimariesGenericFilm,
++};
++
++enum {
++ vdecRESOLUTION_CUSTOM = 0x00000000, /* custom */
++ vdecRESOLUTION_480i = 0x00000001, /* 480i */
++ vdecRESOLUTION_1080i = 0x00000002, /* 1080i (1920x1080, 60i) */
++ vdecRESOLUTION_NTSC = 0x00000003, /* NTSC (720x483, 60i) */
++ vdecRESOLUTION_480p = 0x00000004, /* 480p (720x480, 60p) */
++ vdecRESOLUTION_720p = 0x00000005, /* 720p (1280x720, 60p) */
++ vdecRESOLUTION_PAL1 = 0x00000006, /* PAL_1 (720x576, 50i) */
++ vdecRESOLUTION_1080i25 = 0x00000007, /* 1080i25 (1920x1080, 50i) */
++ vdecRESOLUTION_720p50 = 0x00000008, /* 720p50 (1280x720, 50p) */
++ vdecRESOLUTION_576p = 0x00000009, /* 576p (720x576, 50p) */
++ vdecRESOLUTION_1080i29_97 = 0x0000000A, /* 1080i (1920x1080, 59.94i) */
++ vdecRESOLUTION_720p59_94 = 0x0000000B, /* 720p (1280x720, 59.94p) */
++ vdecRESOLUTION_SD_DVD = 0x0000000C, /* SD DVD (720x483, 60i) */
++ vdecRESOLUTION_480p656 = 0x0000000D, /* 480p (720x480, 60p), output bus width 8 bit, clock 74.25MHz */
++ vdecRESOLUTION_1080p23_976 = 0x0000000E, /* 1080p23_976 (1920x1080, 23.976p) */
++ vdecRESOLUTION_720p23_976 = 0x0000000F, /* 720p23_976 (1280x720p, 23.976p) */
++ vdecRESOLUTION_240p29_97 = 0x00000010, /* 240p (1440x240, 29.97p ) */
++ vdecRESOLUTION_240p30 = 0x00000011, /* 240p (1440x240, 30p) */
++ vdecRESOLUTION_288p25 = 0x00000012, /* 288p (1440x288p, 25p) */
++ vdecRESOLUTION_1080p29_97 = 0x00000013, /* 1080p29_97 (1920x1080, 29.97p) */
++ vdecRESOLUTION_1080p30 = 0x00000014, /* 1080p30 (1920x1080, 30p) */
++ vdecRESOLUTION_1080p24 = 0x00000015, /* 1080p24 (1920x1080, 24p) */
++ vdecRESOLUTION_1080p25 = 0x00000016, /* 1080p25 (1920x1080, 25p) */
++ vdecRESOLUTION_720p24 = 0x00000017, /* 720p24 (1280x720, 25p) */
++ vdecRESOLUTION_720p29_97 = 0x00000018, /* 720p29.97 (1280x720, 29.97p) */
++ vdecRESOLUTION_480p23_976 = 0x00000019, /* 480p23.976 (720*480, 23.976) */
++ vdecRESOLUTION_480p29_97 = 0x0000001A, /* 480p29.976 (720*480, 29.97p) */
++ vdecRESOLUTION_576p25 = 0x0000001B, /* 576p25 (720*576, 25p) */
++ /* For Zero Frame Rate */
++ vdecRESOLUTION_480p0 = 0x0000001C, /* 480p (720x480, 0p) */
++ vdecRESOLUTION_480i0 = 0x0000001D, /* 480i (720x480, 0i) */
++ vdecRESOLUTION_576p0 = 0x0000001E, /* 576p (720x576, 0p) */
++ vdecRESOLUTION_720p0 = 0x0000001F, /* 720p (1280x720, 0p) */
++ vdecRESOLUTION_1080p0 = 0x00000020, /* 1080p (1920x1080, 0p) */
++ vdecRESOLUTION_1080i0 = 0x00000021, /* 1080i (1920x1080, 0i) */
++};
++
++/* Bit definitions for 'flags' field */
++#define VDEC_FLAG_EOS (0x0004)
++
++#define VDEC_FLAG_FRAME (0x0000)
++#define VDEC_FLAG_FIELDPAIR (0x0008)
++#define VDEC_FLAG_TOPFIELD (0x0010)
++#define VDEC_FLAG_BOTTOMFIELD (0x0018)
++
++#define VDEC_FLAG_PROGRESSIVE_SRC (0x0000)
++#define VDEC_FLAG_INTERLACED_SRC (0x0020)
++#define VDEC_FLAG_UNKNOWN_SRC (0x0040)
++
++#define VDEC_FLAG_BOTTOM_FIRST (0x0080)
++#define VDEC_FLAG_LAST_PICTURE (0x0100)
++
++#define VDEC_FLAG_PICTURE_META_DATA_PRESENT (0x40000)
++
++#endif /* _WIN32 || _WIN64 */
++
++enum _BC_OUTPUT_FORMAT {
++ MODE420 = 0x0,
++ MODE422_YUY2 = 0x1,
++ MODE422_UYVY = 0x2,
++};
++
++typedef struct _BC_PIC_INFO_BLOCK {
++ /* Common fields. */
++ uint64_t timeStamp; /* Timestamp */
++ uint32_t picture_number; /* Ordinal display number */
++ uint32_t width; /* pixels */
++ uint32_t height; /* pixels */
++ uint32_t chroma_format; /* 0x420, 0x422 or 0x444 */
++ uint32_t pulldown;
++ uint32_t flags;
++ uint32_t frame_rate;
++ uint32_t aspect_ratio;
++ uint32_t colour_primaries;
++ uint32_t picture_meta_payload;
++ uint32_t sess_num;
++ uint32_t ycom;
++ uint32_t custom_aspect_ratio_width_height;
++ uint32_t n_drop; /* number of non-reference frames remaining to be dropped */
++
++ /* Protocol-specific extensions. */
++ union {
++ BC_PIB_EXT_H264 h264;
++ BC_PIB_EXT_MPEG mpeg;
++ BC_PIB_EXT_VC1 vc1;
++ } other;
++
++} BC_PIC_INFO_BLOCK, *PBC_PIC_INFO_BLOCK;
++
++/*------------------------------------------------------*
++ * ProcOut Info *
++ *------------------------------------------------------*/
++/* Optional flags for ProcOut Interface.*/
++enum _POUT_OPTIONAL_IN_FLAGS_{
++ /* Flags from App to Device */
++ BC_POUT_FLAGS_YV12 = 0x01, /* Copy Data in YV12 format */
++ BC_POUT_FLAGS_STRIDE = 0x02, /* Stride size is valid. */
++ BC_POUT_FLAGS_SIZE = 0x04, /* Take size information from Application */
++ BC_POUT_FLAGS_INTERLACED = 0x08, /* copy only half the bytes */
++ BC_POUT_FLAGS_INTERLEAVED = 0x10, /* interleaved frame */
++
++ /* Flags from Device to APP */
++ BC_POUT_FLAGS_FMT_CHANGE = 0x10000, /* Data is not VALID when this flag is set */
++ BC_POUT_FLAGS_PIB_VALID = 0x20000, /* PIB Information valid */
++ BC_POUT_FLAGS_ENCRYPTED = 0x40000, /* Data is encrypted. */
++ BC_POUT_FLAGS_FLD_BOT = 0x80000, /* Bottom Field data */
++};
++
++#if defined(__KERNEL__) || defined(__LINUX_USER__)
++typedef BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut);
++#else
++typedef BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, struct _BC_DTS_PROC_OUT *pOut);
++#endif
++
++/* Line 21 Closed Caption */
++/* User Data */
++#define MAX_UD_SIZE 1792 /* 1920 - 128 */
++
++typedef struct _BC_DTS_PROC_OUT {
++ uint8_t *Ybuff; /* Caller Supplied buffer for Y data */
++ uint32_t YbuffSz; /* Caller Supplied Y buffer size */
++ uint32_t YBuffDoneSz; /* Transferred Y datasize */
++
++ uint8_t *UVbuff; /* Caller Supplied buffer for UV data */
++ uint32_t UVbuffSz; /* Caller Supplied UV buffer size */
++ uint32_t UVBuffDoneSz; /* Transferred UV data size */
++
++ uint32_t StrideSz; /* Caller supplied Stride Size */
++ uint32_t PoutFlags; /* Call IN Flags */
++
++ uint32_t discCnt; /* Picture discontinuity count */
++
++ BC_PIC_INFO_BLOCK PicInfo; /* Picture Information Block Data */
++
++ /* Line 21 Closed Caption */
++ /* User Data */
++ uint32_t UserDataSz;
++ uint8_t UserData[MAX_UD_SIZE];
++
++ void *hnd;
++ dts_pout_callback AppCallBack;
++ uint8_t DropFrames;
++ uint8_t b422Mode; /* Picture output Mode */
++ uint8_t bPibEnc; /* PIB encrypted */
++ uint8_t bRevertScramble;
++
++} BC_DTS_PROC_OUT;
++
++typedef struct _BC_DTS_STATUS {
++ uint8_t ReadyListCount; /* Number of frames in ready list (reported by driver) */
++ uint8_t FreeListCount; /* Number of frame buffers free. (reported by driver) */
++ uint8_t PowerStateChange; /* Number of active state power transitions (reported by driver) */
++ uint8_t reserved_[1];
++
++ uint32_t FramesDropped; /* Number of frames dropped. (reported by DIL) */
++ uint32_t FramesCaptured; /* Number of frames captured. (reported by DIL) */
++ uint32_t FramesRepeated; /* Number of frames repeated. (reported by DIL) */
++
++ uint32_t InputCount; /* Times compressed video has been sent to the HW.
++ * i.e. Successful DtsProcInput() calls (reported by DIL) */
++ uint64_t InputTotalSize; /* Amount of compressed video that has been sent to the HW.
++ * (reported by DIL) */
++ uint32_t InputBusyCount; /* Times compressed video has attempted to be sent to the HW
++ * but the input FIFO was full. (reported by DIL) */
++
++ uint32_t PIBMissCount; /* Amount of times a PIB is invalid. (reported by DIL) */
++
++ uint32_t cpbEmptySize; /* supported only for H.264, specifically changed for
++ * Adobe. Report size of CPB buffer available.
++ * Reported by DIL */
++ uint64_t NextTimeStamp; /* TimeStamp of the next picture that will be returned
++ * by a call to ProcOutput. Added for Adobe. Reported
++ * back from the driver */
++ uint8_t reserved__[16];
++
++} BC_DTS_STATUS;
++
++#define BC_SWAP32(_v) \
++ ((((_v) & 0xFF000000)>>24)| \
++ (((_v) & 0x00FF0000)>>8)| \
++ (((_v) & 0x0000FF00)<<8)| \
++ (((_v) & 0x000000FF)<<24))
++
++#define WM_AGENT_TRAYICON_DECODER_OPEN 10001
++#define WM_AGENT_TRAYICON_DECODER_CLOSE 10002
++#define WM_AGENT_TRAYICON_DECODER_START 10003
++#define WM_AGENT_TRAYICON_DECODER_STOP 10004
++#define WM_AGENT_TRAYICON_DECODER_RUN 10005
++#define WM_AGENT_TRAYICON_DECODER_PAUSE 10006
++
++
++#endif /* _BC_DTS_DEFS_H_ */
+diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+new file mode 100644
+index 0000000..b3125e3
+--- /dev/null
++++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+@@ -0,0 +1,299 @@
++/********************************************************************
++ * Copyright(c) 2006-2009 Broadcom Corporation.
++ *
++ * Name: bc_dts_glob_lnx.h
++ *
++ * Description: Wrapper to Windows dts_glob.h for Link-Linux usage.
++ * The idea is to define additional Linux related defs
++ * in this file to avoid changes to existing Windows
++ * glob file.
++ *
++ * AU
++ *
++ * HISTORY:
++ *
++ ********************************************************************
++ * This header is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as published
++ * by the Free Software Foundation, either version 2.1 of the License.
++ *
++ * This header is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU Lesser General Public License for more details.
++ * You should have received a copy of the GNU Lesser General Public License
++ * along with this header. If not, see <http://www.gnu.org/licenses/>.
++ *******************************************************************/
++
++#ifndef _BC_DTS_GLOB_LNX_H_
++#define _BC_DTS_GLOB_LNX_H_
++
++#ifdef __LINUX_USER__
++#include <stdio.h>
++#include <stdlib.h>
++#include <unistd.h>
++#include <fcntl.h>
++#include <ctype.h>
++#include <string.h>
++#include <errno.h>
++#include <netdb.h>
++#include <sys/time.h>
++#include <time.h>
++#include <arpa/inet.h>
++#include <asm/param.h>
++#include <linux/ioctl.h>
++#include <sys/select.h>
++
++#define DRVIFLIB_INT_API
++
++#endif
++
++#include "bc_dts_defs.h"
++#include "bcm_70012_regs.h" /* Link Register defs */
++
++#define CRYSTALHD_API_NAME "crystalhd"
++#define CRYSTALHD_API_DEV_NAME "/dev/crystalhd"
++
++/*
++ * These are SW stack tunable parameters shared
++ * between the driver and the application.
++ */
++enum _BC_DTS_GLOBALS {
++ BC_MAX_FW_CMD_BUFF_SZ = 0x40, /* FW passthrough cmd/rsp buffer size */
++ PCI_CFG_SIZE = 256, /* PCI config size buffer */
++ BC_IOCTL_DATA_POOL_SIZE = 8, /* BC_IOCTL_DATA Pool size */
++ BC_LINK_MAX_OPENS = 3, /* Maximum simultaneous opens*/
++ BC_LINK_MAX_SGLS = 1024, /* Maximum SG elements 4M/4K */
++ BC_TX_LIST_CNT = 2, /* Max Tx DMA Rings */
++ BC_RX_LIST_CNT = 8, /* Max Rx DMA Rings*/
++ BC_PROC_OUTPUT_TIMEOUT = 3000, /* Milliseconds */
++ BC_INFIFO_THRESHOLD = 0x10000,
++};
++
++typedef struct _BC_CMD_REG_ACC {
++ uint32_t Offset;
++ uint32_t Value;
++} BC_CMD_REG_ACC;
++
++typedef struct _BC_CMD_DEV_MEM {
++ uint32_t StartOff;
++ uint32_t NumDwords;
++ uint32_t Rsrd;
++} BC_CMD_DEV_MEM;
++
++/* FW Passthrough command structure */
++enum _bc_fw_cmd_flags {
++ BC_FW_CMD_FLAGS_NONE = 0,
++ BC_FW_CMD_PIB_QS = 0x01,
++};
++
++typedef struct _BC_FW_CMD {
++ uint32_t cmd[BC_MAX_FW_CMD_BUFF_SZ];
++ uint32_t rsp[BC_MAX_FW_CMD_BUFF_SZ];
++ uint32_t flags;
++ uint32_t add_data;
++} BC_FW_CMD, *PBC_FW_CMD;
++
++typedef struct _BC_HW_TYPE {
++ uint16_t PciDevId;
++ uint16_t PciVenId;
++ uint8_t HwRev;
++ uint8_t Align[3];
++} BC_HW_TYPE;
++
++typedef struct _BC_PCI_CFG {
++ uint32_t Size;
++ uint32_t Offset;
++ uint8_t pci_cfg_space[PCI_CFG_SIZE];
++} BC_PCI_CFG;
++
++typedef struct _BC_VERSION_INFO_ {
++ uint8_t DriverMajor;
++ uint8_t DriverMinor;
++ uint16_t DriverRevision;
++} BC_VERSION_INFO;
++
++typedef struct _BC_START_RX_CAP_ {
++ uint32_t Rsrd;
++ uint32_t StartDeliveryThsh;
++ uint32_t PauseThsh;
++ uint32_t ResumeThsh;
++} BC_START_RX_CAP;
++
++typedef struct _BC_FLUSH_RX_CAP_ {
++ uint32_t Rsrd;
++ uint32_t bDiscardOnly;
++} BC_FLUSH_RX_CAP;
++
++typedef struct _BC_DTS_STATS {
++ uint8_t drvRLL;
++ uint8_t drvFLL;
++ uint8_t eosDetected;
++ uint8_t pwr_state_change;
++
++ /* Stats from App */
++ uint32_t opFrameDropped;
++ uint32_t opFrameCaptured;
++ uint32_t ipSampleCnt;
++ uint64_t ipTotalSize;
++ uint32_t reptdFrames;
++ uint32_t pauseCount;
++ uint32_t pibMisses;
++ uint32_t discCounter;
++
++ /* Stats from Driver */
++ uint32_t TxFifoBsyCnt;
++ uint32_t intCount;
++ uint32_t DrvIgnIntrCnt;
++ uint32_t DrvTotalFrmDropped;
++ uint32_t DrvTotalHWErrs;
++ uint32_t DrvTotalPIBFlushCnt;
++ uint32_t DrvTotalFrmCaptured;
++ uint32_t DrvPIBMisses;
++ uint32_t DrvPauseTime;
++ uint32_t DrvRepeatedFrms;
++ uint32_t res1[13];
++
++} BC_DTS_STATS;
++
++typedef struct _BC_PROC_INPUT_ {
++ uint8_t *pDmaBuff;
++ uint32_t BuffSz;
++ uint8_t Mapped;
++ uint8_t Encrypted;
++ uint8_t Rsrd[2];
++ uint32_t DramOffset; /* For debug use only */
++} BC_PROC_INPUT, *PBC_PROC_INPUT;
++
++typedef struct _BC_DEC_YUV_BUFFS {
++ uint32_t b422Mode;
++ uint8_t *YuvBuff;
++ uint32_t YuvBuffSz;
++ uint32_t UVbuffOffset;
++ uint32_t YBuffDoneSz;
++ uint32_t UVBuffDoneSz;
++ uint32_t RefCnt;
++} BC_DEC_YUV_BUFFS;
++
++enum _DECOUT_COMPLETION_FLAGS{
++ COMP_FLAG_NO_INFO = 0x00,
++ COMP_FLAG_FMT_CHANGE = 0x01,
++ COMP_FLAG_PIB_VALID = 0x02,
++ COMP_FLAG_DATA_VALID = 0x04,
++ COMP_FLAG_DATA_ENC = 0x08,
++ COMP_FLAG_DATA_BOT = 0x10,
++};
++
++typedef struct _BC_DEC_OUT_BUFF{
++ BC_DEC_YUV_BUFFS OutPutBuffs;
++ BC_PIC_INFO_BLOCK PibInfo;
++ uint32_t Flags;
++ uint32_t BadFrCnt;
++} BC_DEC_OUT_BUFF;
++
++typedef struct _BC_NOTIFY_MODE {
++ uint32_t Mode;
++ uint32_t Rsvr[3];
++} BC_NOTIFY_MODE;
++
++typedef struct _BC_CLOCK {
++ uint32_t clk;
++ uint32_t Rsvr[3];
++} BC_CLOCK;
++
++typedef struct _BC_IOCTL_DATA {
++ BC_STATUS RetSts;
++ uint32_t IoctlDataSz;
++ uint32_t Timeout;
++ union {
++ BC_CMD_REG_ACC regAcc;
++ BC_CMD_DEV_MEM devMem;
++ BC_FW_CMD fwCmd;
++ BC_HW_TYPE hwType;
++ BC_PCI_CFG pciCfg;
++ BC_VERSION_INFO VerInfo;
++ BC_PROC_INPUT ProcInput;
++ BC_DEC_YUV_BUFFS RxBuffs;
++ BC_DEC_OUT_BUFF DecOutData;
++ BC_START_RX_CAP RxCap;
++ BC_FLUSH_RX_CAP FlushRxCap;
++ BC_DTS_STATS drvStat;
++ BC_NOTIFY_MODE NotifyMode;
++ BC_CLOCK clockValue;
++ } u;
++ struct _BC_IOCTL_DATA *next;
++} BC_IOCTL_DATA;
++
++typedef enum _BC_DRV_CMD{
++ DRV_CMD_VERSION = 0, /* Get SW version */
++ DRV_CMD_GET_HWTYPE, /* Get HW version and type Dozer/Tank */
++ DRV_CMD_REG_RD, /* Read Device Register */
++ DRV_CMD_REG_WR, /* Write Device Register */
++ DRV_CMD_FPGA_RD, /* Read FPGA Register */
++ DRV_CMD_FPGA_WR, /* Wrtie FPGA Reister */
++ DRV_CMD_MEM_RD, /* Read Device Memory */
++ DRV_CMD_MEM_WR, /* Write Device Memory */
++ DRV_CMD_RD_PCI_CFG, /* Read PCI Config Space */
++ DRV_CMD_WR_PCI_CFG, /* Write the PCI Configuration Space*/
++ DRV_CMD_FW_DOWNLOAD, /* Download Firmware */
++ DRV_ISSUE_FW_CMD, /* Issue FW Cmd (pass through mode) */
++ DRV_CMD_PROC_INPUT, /* Process Input Sample */
++ DRV_CMD_ADD_RXBUFFS, /* Add Rx side buffers to driver pool */
++ DRV_CMD_FETCH_RXBUFF, /* Get Rx DMAed buffer */
++ DRV_CMD_START_RX_CAP, /* Start Rx Buffer Capture */
++ DRV_CMD_FLUSH_RX_CAP, /* Stop the capture for now...we will enhance this later*/
++ DRV_CMD_GET_DRV_STAT, /* Get Driver Internal Statistics */
++ DRV_CMD_RST_DRV_STAT, /* Reset Driver Internal Statistics */
++ DRV_CMD_NOTIFY_MODE, /* Notify the Mode to driver in which the application is Operating*/
++ DRV_CMD_CHANGE_CLOCK, /* Change the core clock to either save power or improve performance */
++
++ /* MUST be the last one.. */
++ DRV_CMD_END, /* End of the List.. */
++} BC_DRV_CMD;
++
++#define BC_IOC_BASE 'b'
++#define BC_IOC_VOID _IOC_NONE
++#define BC_IOC_IOWR(nr, type) _IOWR(BC_IOC_BASE, nr, type)
++#define BC_IOCTL_MB BC_IOCTL_DATA
++
++#define BCM_IOC_GET_VERSION BC_IOC_IOWR(DRV_CMD_VERSION, BC_IOCTL_MB)
++#define BCM_IOC_GET_HWTYPE BC_IOC_IOWR(DRV_CMD_GET_HWTYPE, BC_IOCTL_MB)
++#define BCM_IOC_REG_RD BC_IOC_IOWR(DRV_CMD_REG_RD, BC_IOCTL_MB)
++#define BCM_IOC_REG_WR BC_IOC_IOWR(DRV_CMD_REG_WR, BC_IOCTL_MB)
++#define BCM_IOC_MEM_RD BC_IOC_IOWR(DRV_CMD_MEM_RD, BC_IOCTL_MB)
++#define BCM_IOC_MEM_WR BC_IOC_IOWR(DRV_CMD_MEM_WR, BC_IOCTL_MB)
++#define BCM_IOC_FPGA_RD BC_IOC_IOWR(DRV_CMD_FPGA_RD, BC_IOCTL_MB)
++#define BCM_IOC_FPGA_WR BC_IOC_IOWR(DRV_CMD_FPGA_WR, BC_IOCTL_MB)
++#define BCM_IOC_RD_PCI_CFG BC_IOC_IOWR(DRV_CMD_RD_PCI_CFG, BC_IOCTL_MB)
++#define BCM_IOC_WR_PCI_CFG BC_IOC_IOWR(DRV_CMD_WR_PCI_CFG, BC_IOCTL_MB)
++#define BCM_IOC_PROC_INPUT BC_IOC_IOWR(DRV_CMD_PROC_INPUT, BC_IOCTL_MB)
++#define BCM_IOC_ADD_RXBUFFS BC_IOC_IOWR(DRV_CMD_ADD_RXBUFFS, BC_IOCTL_MB)
++#define BCM_IOC_FETCH_RXBUFF BC_IOC_IOWR(DRV_CMD_FETCH_RXBUFF, BC_IOCTL_MB)
++#define BCM_IOC_FW_CMD BC_IOC_IOWR(DRV_ISSUE_FW_CMD, BC_IOCTL_MB)
++#define BCM_IOC_START_RX_CAP BC_IOC_IOWR(DRV_CMD_START_RX_CAP, BC_IOCTL_MB)
++#define BCM_IOC_FLUSH_RX_CAP BC_IOC_IOWR(DRV_CMD_FLUSH_RX_CAP, BC_IOCTL_MB)
++#define BCM_IOC_GET_DRV_STAT BC_IOC_IOWR(DRV_CMD_GET_DRV_STAT, BC_IOCTL_MB)
++#define BCM_IOC_RST_DRV_STAT BC_IOC_IOWR(DRV_CMD_RST_DRV_STAT, BC_IOCTL_MB)
++#define BCM_IOC_NOTIFY_MODE BC_IOC_IOWR(DRV_CMD_NOTIFY_MODE, BC_IOCTL_MB)
++#define BCM_IOC_FW_DOWNLOAD BC_IOC_IOWR(DRV_CMD_FW_DOWNLOAD, BC_IOCTL_MB)
++#define BCM_IOC_CHG_CLK BC_IOC_IOWR(DRV_CMD_CHANGE_CLOCK, BC_IOCTL_MB)
++#define BCM_IOC_END BC_IOC_VOID
++
++/* Wrapper for main IOCTL data */
++typedef struct _crystalhd_ioctl_data {
++ BC_IOCTL_DATA udata; /* IOCTL from App..*/
++ uint32_t u_id; /* Driver specific user ID */
++ uint32_t cmd; /* Cmd ID for driver's use. */
++ void *add_cdata; /* Additional command specific data..*/
++ uint32_t add_cdata_sz; /* Additional command specific data size */
++ struct _crystalhd_ioctl_data *next; /* List/Fifo management */
++} crystalhd_ioctl_data;
++
++
++enum _crystalhd_kmod_ver{
++ crystalhd_kmod_major = 0,
++ crystalhd_kmod_minor = 9,
++ crystalhd_kmod_rev = 27,
++};
++
++#endif
+diff --git a/drivers/staging/crystalhd/bc_dts_types.h b/drivers/staging/crystalhd/bc_dts_types.h
+new file mode 100644
+index 0000000..ac0c817
+--- /dev/null
++++ b/drivers/staging/crystalhd/bc_dts_types.h
+@@ -0,0 +1,121 @@
++/********************************************************************
++ * Copyright(c) 2006-2009 Broadcom Corporation.
++ *
++ * Name: bc_dts_types.h
++ *
++ * Description: Data types
++ *
++ * AU
++ *
++ * HISTORY:
++ *
++ ********************************************************************
++ * This header is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as published
++ * by the Free Software Foundation, either version 2.1 of the License.
++ *
++ * This header is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU Lesser General Public License for more details.
++ * You should have received a copy of the GNU Lesser General Public License
++ * along with this header. If not, see <http://www.gnu.org/licenses/>.
++ *******************************************************************/
++
++#ifndef _BC_DTS_TYPES_H_
++#define _BC_DTS_TYPES_H_
++
++#ifdef __LINUX_USER__ // Don't include these for KERNEL..
++#include <stdint.h>
++#endif
++
++#if defined(_WIN64) || defined(_WIN32)
++typedef uint32_t U32;
++typedef int32_t S32;
++typedef uint16_t U16;
++typedef int16_t S16;
++typedef unsigned char U8;
++typedef char S8;
++#endif
++
++#ifndef PVOID
++typedef void *PVOID;
++#endif
++
++#ifndef BOOL
++typedef int BOOL;
++#endif
++
++#ifdef WIN32
++ typedef unsigned __int64 U64;
++#elif defined(_WIN64)
++ typedef uint64_t U64;
++#endif
++
++#ifdef _WIN64
++#if !(defined(POINTER_32))
++#define POINTER_32 __ptr32
++#endif
++#else /* _WIN32 */
++#define POINTER_32
++#endif
++
++#if defined(__KERNEL__) || defined(__LINUX_USER__)
++
++#ifdef __LINUX_USER__ /* Don't include these for KERNEL */
++typedef uint32_t ULONG;
++typedef int32_t LONG;
++typedef void *HANDLE;
++#ifndef VOID
++typedef void VOID;
++#endif
++typedef void *LPVOID;
++typedef uint32_t DWORD;
++typedef uint32_t UINT32;
++typedef uint32_t *LPDWORD;
++typedef unsigned char *PUCHAR;
++
++#ifndef TRUE
++ #define TRUE 1
++#endif
++
++#ifndef FALSE
++ #define FALSE 0
++#endif
++
++#define TEXT
++
++#else
++
++/* For Kernel usage.. */
++typedef bool bc_bool_t;
++#endif
++
++#else
++
++#ifndef uint64_t
++typedef struct _uint64_t {
++ uint32_t low_dw;
++ uint32_t hi_dw;
++} uint64_t;
++#endif
++
++#ifndef int32_t
++typedef signed long int32_t;
++#endif
++
++#ifndef uint32_t
++typedef unsigned long uint32_t;
++#endif
++
++#ifndef uint16_t
++typedef unsigned short uint16_t;
++#endif
++
++#ifndef uint8_t
++typedef unsigned char uint8_t;
++#endif
++#endif
++
++#endif
++
+diff --git a/drivers/staging/crystalhd/bcm_70012_regs.h b/drivers/staging/crystalhd/bcm_70012_regs.h
+new file mode 100644
+index 0000000..6922f54
+--- /dev/null
++++ b/drivers/staging/crystalhd/bcm_70012_regs.h
+@@ -0,0 +1,757 @@
++/***************************************************************************
++ * Copyright (c) 1999-2009, Broadcom Corporation.
++ *
++ * Name: bcm_70012_regs.h
++ *
++ * Description: BCM70012 registers
++ *
++ ********************************************************************
++ * This header is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as published
++ * by the Free Software Foundation, either version 2.1 of the License.
++ *
++ * This header is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU Lesser General Public License for more details.
++ * You should have received a copy of the GNU Lesser General Public License
++ * along with this header. If not, see <http://www.gnu.org/licenses/>.
++ ***************************************************************************/
++
++#ifndef MACFILE_H__
++#define MACFILE_H__
++
++/**
++ * m = memory, c = core, r = register, f = field, d = data.
++ */
++#if !defined(GET_FIELD) && !defined(SET_FIELD)
++#define BRCM_ALIGN(c,r,f) c##_##r##_##f##_ALIGN
++#define BRCM_BITS(c,r,f) c##_##r##_##f##_BITS
++#define BRCM_MASK(c,r,f) c##_##r##_##f##_MASK
++#define BRCM_SHIFT(c,r,f) c##_##r##_##f##_SHIFT
++
++#define GET_FIELD(m,c,r,f) \
++ ((((m) & BRCM_MASK(c,r,f)) >> BRCM_SHIFT(c,r,f)) << BRCM_ALIGN(c,r,f))
++
++#define SET_FIELD(m,c,r,f,d) \
++ ((m) = (((m) & ~BRCM_MASK(c,r,f)) | ((((d) >> BRCM_ALIGN(c,r,f)) << \
++ BRCM_SHIFT(c,r,f)) & BRCM_MASK(c,r,f))) \
++ )
++
++#define SET_TYPE_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,c##_##d)
++#define SET_NAME_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,c##_##r##_##f##_##d)
++#define SET_VALUE_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,d)
++
++#endif /* GET & SET */
++
++/****************************************************************************
++ * Core Enums.
++ ***************************************************************************/
++/****************************************************************************
++ * Enums: AES_RGR_BRIDGE_RESET_CTRL
++ ***************************************************************************/
++#define AES_RGR_BRIDGE_RESET_CTRL_DEASSERT 0
++#define AES_RGR_BRIDGE_RESET_CTRL_ASSERT 1
++
++/****************************************************************************
++ * Enums: CCE_RGR_BRIDGE_RESET_CTRL
++ ***************************************************************************/
++#define CCE_RGR_BRIDGE_RESET_CTRL_DEASSERT 0
++#define CCE_RGR_BRIDGE_RESET_CTRL_ASSERT 1
++
++/****************************************************************************
++ * Enums: DBU_RGR_BRIDGE_RESET_CTRL
++ ***************************************************************************/
++#define DBU_RGR_BRIDGE_RESET_CTRL_DEASSERT 0
++#define DBU_RGR_BRIDGE_RESET_CTRL_ASSERT 1
++
++/****************************************************************************
++ * Enums: DCI_RGR_BRIDGE_RESET_CTRL
++ ***************************************************************************/
++#define DCI_RGR_BRIDGE_RESET_CTRL_DEASSERT 0
++#define DCI_RGR_BRIDGE_RESET_CTRL_ASSERT 1
++
++/****************************************************************************
++ * Enums: GISB_ARBITER_DEASSERT_ASSERT
++ ***************************************************************************/
++#define GISB_ARBITER_DEASSERT_ASSERT_DEASSERT 0
++#define GISB_ARBITER_DEASSERT_ASSERT_ASSERT 1
++
++/****************************************************************************
++ * Enums: GISB_ARBITER_UNMASK_MASK
++ ***************************************************************************/
++#define GISB_ARBITER_UNMASK_MASK_UNMASK 0
++#define GISB_ARBITER_UNMASK_MASK_MASK 1
++
++/****************************************************************************
++ * Enums: GISB_ARBITER_DISABLE_ENABLE
++ ***************************************************************************/
++#define GISB_ARBITER_DISABLE_ENABLE_DISABLE 0
++#define GISB_ARBITER_DISABLE_ENABLE_ENABLE 1
++
++/****************************************************************************
++ * Enums: I2C_GR_BRIDGE_RESET_CTRL
++ ***************************************************************************/
++#define I2C_GR_BRIDGE_RESET_CTRL_DEASSERT 0
++#define I2C_GR_BRIDGE_RESET_CTRL_ASSERT 1
++
++/****************************************************************************
++ * Enums: MISC_GR_BRIDGE_RESET_CTRL
++ ***************************************************************************/
++#define MISC_GR_BRIDGE_RESET_CTRL_DEASSERT 0
++#define MISC_GR_BRIDGE_RESET_CTRL_ASSERT 1
++
++/****************************************************************************
++ * Enums: OTP_GR_BRIDGE_RESET_CTRL
++ ***************************************************************************/
++#define OTP_GR_BRIDGE_RESET_CTRL_DEASSERT 0
++#define OTP_GR_BRIDGE_RESET_CTRL_ASSERT 1
++
++/****************************************************************************
++ * BCM70012_TGT_TOP_PCIE_CFG
++ ***************************************************************************/
++#define PCIE_CFG_DEVICE_VENDOR_ID 0x00000000 /* DEVICE_VENDOR_ID Register */
++#define PCIE_CFG_STATUS_COMMAND 0x00000004 /* STATUS_COMMAND Register */
++#define PCIE_CFG_PCI_CLASSCODE_AND_REVISION_ID 0x00000008 /* PCI_CLASSCODE_AND_REVISION_ID Register */
++#define PCIE_CFG_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE_SIZE 0x0000000c /* BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE_SIZE Register */
++#define PCIE_CFG_BASE_ADDRESS_1 0x00000010 /* BASE_ADDRESS_1 Register */
++#define PCIE_CFG_BASE_ADDRESS_2 0x00000014 /* BASE_ADDRESS_2 Register */
++#define PCIE_CFG_BASE_ADDRESS_3 0x00000018 /* BASE_ADDRESS_3 Register */
++#define PCIE_CFG_BASE_ADDRESS_4 0x0000001c /* BASE_ADDRESS_4 Register */
++#define PCIE_CFG_CARDBUS_CIS_POINTER 0x00000028 /* CARDBUS_CIS_POINTER Register */
++#define PCIE_CFG_SUBSYSTEM_DEVICE_VENDOR_ID 0x0000002c /* SUBSYSTEM_DEVICE_VENDOR_ID Register */
++#define PCIE_CFG_EXPANSION_ROM_BASE_ADDRESS 0x00000030 /* EXPANSION_ROM_BASE_ADDRESS Register */
++#define PCIE_CFG_CAPABILITIES_POINTER 0x00000034 /* CAPABILITIES_POINTER Register */
++#define PCIE_CFG_INTERRUPT 0x0000003c /* INTERRUPT Register */
++#define PCIE_CFG_VPD_CAPABILITIES 0x00000040 /* VPD_CAPABILITIES Register */
++#define PCIE_CFG_VPD_DATA 0x00000044 /* VPD_DATA Register */
++#define PCIE_CFG_POWER_MANAGEMENT_CAPABILITY 0x00000048 /* POWER_MANAGEMENT_CAPABILITY Register */
++#define PCIE_CFG_POWER_MANAGEMENT_CONTROL_STATUS 0x0000004c /* POWER_MANAGEMENT_CONTROL_STATUS Register */
++#define PCIE_CFG_MSI_CAPABILITY_HEADER 0x00000050 /* MSI_CAPABILITY_HEADER Register */
++#define PCIE_CFG_MSI_LOWER_ADDRESS 0x00000054 /* MSI_LOWER_ADDRESS Register */
++#define PCIE_CFG_MSI_UPPER_ADDRESS_REGISTER 0x00000058 /* MSI_UPPER_ADDRESS_REGISTER Register */
++#define PCIE_CFG_MSI_DATA 0x0000005c /* MSI_DATA Register */
++#define PCIE_CFG_BROADCOM_VENDOR_SPECIFIC_CAPABILITY_HEADER 0x00000060 /* BROADCOM_VENDOR_SPECIFIC_CAPABILITY_HEADER Register */
++#define PCIE_CFG_RESET_COUNTERS_INITIAL_VALUES 0x00000064 /* RESET_COUNTERS_INITIAL_VALUES Register */
++#define PCIE_CFG_MISCELLANEOUS_HOST_CONTROL 0x00000068 /* MISCELLANEOUS_HOST_CONTROL Register */
++#define PCIE_CFG_SPARE 0x0000006c /* SPARE Register */
++#define PCIE_CFG_PCI_STATE 0x00000070 /* PCI_STATE Register */
++#define PCIE_CFG_CLOCK_CONTROL 0x00000074 /* CLOCK_CONTROL Register */
++#define PCIE_CFG_REGISTER_BASE 0x00000078 /* REGISTER_BASE Register */
++#define PCIE_CFG_MEMORY_BASE 0x0000007c /* MEMORY_BASE Register */
++#define PCIE_CFG_REGISTER_DATA 0x00000080 /* REGISTER_DATA Register */
++#define PCIE_CFG_MEMORY_DATA 0x00000084 /* MEMORY_DATA Register */
++#define PCIE_CFG_EXPANSION_ROM_BAR_SIZE 0x00000088 /* EXPANSION_ROM_BAR_SIZE Register */
++#define PCIE_CFG_EXPANSION_ROM_ADDRESS 0x0000008c /* EXPANSION_ROM_ADDRESS Register */
++#define PCIE_CFG_EXPANSION_ROM_DATA 0x00000090 /* EXPANSION_ROM_DATA Register */
++#define PCIE_CFG_VPD_INTERFACE 0x00000094 /* VPD_INTERFACE Register */
++#define PCIE_CFG_UNDI_RECEIVE_BD_STANDARD_PRODUCER_RING_PRODUCER_INDEX_MAILBOX_UPPER 0x00000098 /* UNDI_RECEIVE_BD_STANDARD_PRODUCER_RING_PRODUCER_INDEX_MAILBOX_UPPER Register */
++#define PCIE_CFG_UNDI_RECEIVE_BD_STANDARD_PRODUCER_RING_PRODUCER_INDEX_MAILBOX_LOWER 0x0000009c /* UNDI_RECEIVE_BD_STANDARD_PRODUCER_RING_PRODUCER_INDEX_MAILBOX_LOWER Register */
++#define PCIE_CFG_UNDI_RECEIVE_RETURN_RING_CONSUMER_INDEX_UPPER 0x000000a0 /* UNDI_RECEIVE_RETURN_RING_CONSUMER_INDEX_UPPER Register */
++#define PCIE_CFG_UNDI_RECEIVE_RETURN_RING_CONSUMER_INDEX_LOWER 0x000000a4 /* UNDI_RECEIVE_RETURN_RING_CONSUMER_INDEX_LOWER Register */
++#define PCIE_CFG_UNDI_SEND_BD_PRODUCER_INDEX_MAILBOX_UPPER 0x000000a8 /* UNDI_SEND_BD_PRODUCER_INDEX_MAILBOX_UPPER Register */
++#define PCIE_CFG_UNDI_SEND_BD_PRODUCER_INDEX_MAILBOX_LOWER 0x000000ac /* UNDI_SEND_BD_PRODUCER_INDEX_MAILBOX_LOWER Register */
++#define PCIE_CFG_INT_MAILBOX_UPPER 0x000000b0 /* INT_MAILBOX_UPPER Register */
++#define PCIE_CFG_INT_MAILBOX_LOWER 0x000000b4 /* INT_MAILBOX_LOWER Register */
++#define PCIE_CFG_PRODUCT_ID_AND_ASIC_REVISION 0x000000bc /* PRODUCT_ID_AND_ASIC_REVISION Register */
++#define PCIE_CFG_FUNCTION_EVENT 0x000000c0 /* FUNCTION_EVENT Register */
++#define PCIE_CFG_FUNCTION_EVENT_MASK 0x000000c4 /* FUNCTION_EVENT_MASK Register */
++#define PCIE_CFG_FUNCTION_PRESENT 0x000000c8 /* FUNCTION_PRESENT Register */
++#define PCIE_CFG_PCIE_CAPABILITIES 0x000000cc /* PCIE_CAPABILITIES Register */
++#define PCIE_CFG_DEVICE_CAPABILITIES 0x000000d0 /* DEVICE_CAPABILITIES Register */
++#define PCIE_CFG_DEVICE_STATUS_CONTROL 0x000000d4 /* DEVICE_STATUS_CONTROL Register */
++#define PCIE_CFG_LINK_CAPABILITY 0x000000d8 /* LINK_CAPABILITY Register */
++#define PCIE_CFG_LINK_STATUS_CONTROL 0x000000dc /* LINK_STATUS_CONTROL Register */
++#define PCIE_CFG_DEVICE_CAPABILITIES_2 0x000000f0 /* DEVICE_CAPABILITIES_2 Register */
++#define PCIE_CFG_DEVICE_STATUS_CONTROL_2 0x000000f4 /* DEVICE_STATUS_CONTROL_2 Register */
++#define PCIE_CFG_LINK_CAPABILITIES_2 0x000000f8 /* LINK_CAPABILITIES_2 Register */
++#define PCIE_CFG_LINK_STATUS_CONTROL_2 0x000000fc /* LINK_STATUS_CONTROL_2 Register */
++#define PCIE_CFG_ADVANCED_ERROR_REPORTING_ENHANCED_CAPABILITY_HEADER 0x00000100 /* ADVANCED_ERROR_REPORTING_ENHANCED_CAPABILITY_HEADER Register */
++#define PCIE_CFG_UNCORRECTABLE_ERROR_STATUS 0x00000104 /* UNCORRECTABLE_ERROR_STATUS Register */
++#define PCIE_CFG_UNCORRECTABLE_ERROR_MASK 0x00000108 /* UNCORRECTABLE_ERROR_MASK Register */
++#define PCIE_CFG_UNCORRECTABLE_ERROR_SEVERITY 0x0000010c /* UNCORRECTABLE_ERROR_SEVERITY Register */
++#define PCIE_CFG_CORRECTABLE_ERROR_STATUS 0x00000110 /* CORRECTABLE_ERROR_STATUS Register */
++#define PCIE_CFG_CORRECTABLE_ERROR_MASK 0x00000114 /* CORRECTABLE_ERROR_MASK Register */
++#define PCIE_CFG_ADVANCED_ERROR_CAPABILITIES_AND_CONTROL 0x00000118 /* ADVANCED_ERROR_CAPABILITIES_AND_CONTROL Register */
++#define PCIE_CFG_HEADER_LOG_1 0x0000011c /* HEADER_LOG_1 Register */
++#define PCIE_CFG_HEADER_LOG_2 0x00000120 /* HEADER_LOG_2 Register */
++#define PCIE_CFG_HEADER_LOG_3 0x00000124 /* HEADER_LOG_3 Register */
++#define PCIE_CFG_HEADER_LOG_4 0x00000128 /* HEADER_LOG_4 Register */
++#define PCIE_CFG_VIRTUAL_CHANNEL_ENHANCED_CAPABILITY_HEADER 0x0000013c /* VIRTUAL_CHANNEL_ENHANCED_CAPABILITY_HEADER Register */
++#define PCIE_CFG_PORT_VC_CAPABILITY 0x00000140 /* PORT_VC_CAPABILITY Register */
++#define PCIE_CFG_PORT_VC_CAPABILITY_2 0x00000144 /* PORT_VC_CAPABILITY_2 Register */
++#define PCIE_CFG_PORT_VC_STATUS_CONTROL 0x00000148 /* PORT_VC_STATUS_CONTROL Register */
++#define PCIE_CFG_VC_RESOURCE_CAPABILITY 0x0000014c /* VC_RESOURCE_CAPABILITY Register */
++#define PCIE_CFG_VC_RESOURCE_CONTROL 0x00000150 /* VC_RESOURCE_CONTROL Register */
++#define PCIE_CFG_VC_RESOURCE_STATUS 0x00000154 /* VC_RESOURCE_STATUS Register */
++#define PCIE_CFG_DEVICE_SERIAL_NO_ENHANCED_CAPABILITY_HEADER 0x00000160 /* DEVICE_SERIAL_NO_ENHANCED_CAPABILITY_HEADER Register */
++#define PCIE_CFG_DEVICE_SERIAL_NO_LOWER_DW 0x00000164 /* DEVICE_SERIAL_NO_LOWER_DW Register */
++#define PCIE_CFG_DEVICE_SERIAL_NO_UPPER_DW 0x00000168 /* DEVICE_SERIAL_NO_UPPER_DW Register */
++#define PCIE_CFG_POWER_BUDGETING_ENHANCED_CAPABILITY_HEADER 0x0000016c /* POWER_BUDGETING_ENHANCED_CAPABILITY_HEADER Register */
++#define PCIE_CFG_POWER_BUDGETING_DATA_SELECT 0x00000170 /* POWER_BUDGETING_DATA_SELECT Register */
++#define PCIE_CFG_POWER_BUDGETING_DATA 0x00000174 /* POWER_BUDGETING_DATA Register */
++#define PCIE_CFG_POWER_BUDGETING_CAPABILITY 0x00000178 /* POWER_BUDGETING_CAPABILITY Register */
++#define PCIE_CFG_FIRMWARE_POWER_BUDGETING_2_1 0x0000017c /* FIRMWARE_POWER_BUDGETING_2_1 Register */
++#define PCIE_CFG_FIRMWARE_POWER_BUDGETING_4_3 0x00000180 /* FIRMWARE_POWER_BUDGETING_4_3 Register */
++#define PCIE_CFG_FIRMWARE_POWER_BUDGETING_6_5 0x00000184 /* FIRMWARE_POWER_BUDGETING_6_5 Register */
++#define PCIE_CFG_FIRMWARE_POWER_BUDGETING_8_7 0x00000188 /* FIRMWARE_POWER_BUDGETING_8_7 Register */
++#define PCIE_CFG_PCIE_1_1_ADVISORY_NON_FATAL_ERROR_MASKING 0x0000018c /* PCIE_1_1_ADVISORY_NON_FATAL_ERROR_MASKING Register */
++
++
++/****************************************************************************
++ * BCM70012_TGT_TOP_PCIE_TL
++ ***************************************************************************/
++#define PCIE_TL_TL_CONTROL 0x00000400 /* TL_CONTROL Register */
++#define PCIE_TL_TRANSACTION_CONFIGURATION 0x00000404 /* TRANSACTION_CONFIGURATION Register */
++
++
++/****************************************************************************
++ * BCM70012_TGT_TOP_PCIE_DLL
++ ***************************************************************************/
++#define PCIE_DLL_DATA_LINK_CONTROL 0x00000500 /* DATA_LINK_CONTROL Register */
++#define PCIE_DLL_DATA_LINK_STATUS 0x00000504 /* DATA_LINK_STATUS Register */
++
++
++/****************************************************************************
++ * BCM70012_TGT_TOP_INTR
++ ***************************************************************************/
++#define INTR_INTR_STATUS 0x00000700 /* Interrupt Status Register */
++#define INTR_INTR_SET 0x00000704 /* Interrupt Set Register */
++#define INTR_INTR_CLR_REG 0x00000708 /* Interrupt Clear Register */
++#define INTR_INTR_MSK_STS_REG 0x0000070c /* Interrupt Mask Status Register */
++#define INTR_INTR_MSK_SET_REG 0x00000710 /* Interrupt Mask Set Register */
++#define INTR_INTR_MSK_CLR_REG 0x00000714 /* Interrupt Mask Clear Register */
++#define INTR_EOI_CTRL 0x00000720 /* End of interrupt control register */
++
++
++/****************************************************************************
++ * BCM70012_MISC_TOP_MISC1
++ ***************************************************************************/
++#define MISC1_TX_FIRST_DESC_L_ADDR_LIST0 0x00000c00 /* Tx DMA Descriptor List0 First Descriptor lower Address */
++#define MISC1_TX_FIRST_DESC_U_ADDR_LIST0 0x00000c04 /* Tx DMA Descriptor List0 First Descriptor Upper Address */
++#define MISC1_TX_FIRST_DESC_L_ADDR_LIST1 0x00000c08 /* Tx DMA Descriptor List1 First Descriptor Lower Address */
++#define MISC1_TX_FIRST_DESC_U_ADDR_LIST1 0x00000c0c /* Tx DMA Descriptor List1 First Descriptor Upper Address */
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS 0x00000c10 /* Tx DMA Software Descriptor List Control and Status */
++#define MISC1_TX_DMA_ERROR_STATUS 0x00000c18 /* Tx DMA Engine Error Status */
++#define MISC1_TX_DMA_LIST0_CUR_DESC_L_ADDR 0x00000c1c /* Tx DMA List0 Current Descriptor Lower Address */
++#define MISC1_TX_DMA_LIST0_CUR_DESC_U_ADDR 0x00000c20 /* Tx DMA List0 Current Descriptor Upper Address */
++#define MISC1_TX_DMA_LIST0_CUR_BYTE_CNT_REM 0x00000c24 /* Tx DMA List0 Current Descriptor Upper Address */
++#define MISC1_TX_DMA_LIST1_CUR_DESC_L_ADDR 0x00000c28 /* Tx DMA List1 Current Descriptor Lower Address */
++#define MISC1_TX_DMA_LIST1_CUR_DESC_U_ADDR 0x00000c2c /* Tx DMA List1 Current Descriptor Upper Address */
++#define MISC1_TX_DMA_LIST1_CUR_BYTE_CNT_REM 0x00000c30 /* Tx DMA List1 Current Descriptor Upper Address */
++#define MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0 0x00000c34 /* Y Rx Descriptor List0 First Descriptor Lower Address */
++#define MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0 0x00000c38 /* Y Rx Descriptor List0 First Descriptor Upper Address */
++#define MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1 0x00000c3c /* Y Rx Descriptor List1 First Descriptor Lower Address */
++#define MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1 0x00000c40 /* Y Rx Descriptor List1 First Descriptor Upper Address */
++#define MISC1_Y_RX_SW_DESC_LIST_CTRL_STS 0x00000c44 /* Y Rx Software Descriptor List Control and Status */
++#define MISC1_Y_RX_ERROR_STATUS 0x00000c4c /* Y Rx Engine Error Status */
++#define MISC1_Y_RX_LIST0_CUR_DESC_L_ADDR 0x00000c50 /* Y Rx List0 Current Descriptor Lower Address */
++#define MISC1_Y_RX_LIST0_CUR_DESC_U_ADDR 0x00000c54 /* Y Rx List0 Current Descriptor Upper Address */
++#define MISC1_Y_RX_LIST0_CUR_BYTE_CNT 0x00000c58 /* Y Rx List0 Current Descriptor Byte Count */
++#define MISC1_Y_RX_LIST1_CUR_DESC_L_ADDR 0x00000c5c /* Y Rx List1 Current Descriptor Lower address */
++#define MISC1_Y_RX_LIST1_CUR_DESC_U_ADDR 0x00000c60 /* Y Rx List1 Current Descriptor Upper address */
++#define MISC1_Y_RX_LIST1_CUR_BYTE_CNT 0x00000c64 /* Y Rx List1 Current Descriptor Byte Count */
++#define MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0 0x00000c68 /* UV Rx Descriptor List0 First Descriptor lower Address */
++#define MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0 0x00000c6c /* UV Rx Descriptor List0 First Descriptor Upper Address */
++#define MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1 0x00000c70 /* UV Rx Descriptor List1 First Descriptor Lower Address */
++#define MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1 0x00000c74 /* UV Rx Descriptor List1 First Descriptor Upper Address */
++#define MISC1_UV_RX_SW_DESC_LIST_CTRL_STS 0x00000c78 /* UV Rx Software Descriptor List Control and Status */
++#define MISC1_UV_RX_ERROR_STATUS 0x00000c7c /* UV Rx Engine Error Status */
++#define MISC1_UV_RX_LIST0_CUR_DESC_L_ADDR 0x00000c80 /* UV Rx List0 Current Descriptor Lower Address */
++#define MISC1_UV_RX_LIST0_CUR_DESC_U_ADDR 0x00000c84 /* UV Rx List0 Current Descriptor Upper Address */
++#define MISC1_UV_RX_LIST0_CUR_BYTE_CNT 0x00000c88 /* UV Rx List0 Current Descriptor Byte Count */
++#define MISC1_UV_RX_LIST1_CUR_DESC_L_ADDR 0x00000c8c /* UV Rx List1 Current Descriptor Lower Address */
++#define MISC1_UV_RX_LIST1_CUR_DESC_U_ADDR 0x00000c90 /* UV Rx List1 Current Descriptor Upper Address */
++#define MISC1_UV_RX_LIST1_CUR_BYTE_CNT 0x00000c94 /* UV Rx List1 Current Descriptor Byte Count */
++#define MISC1_DMA_DEBUG_OPTIONS_REG 0x00000c98 /* DMA Debug Options Register */
++#define MISC1_READ_CHANNEL_ERROR_STATUS 0x00000c9c /* Read Channel Error Status */
++#define MISC1_PCIE_DMA_CTRL 0x00000ca0 /* PCIE DMA Control Register */
++
++
++/****************************************************************************
++ * BCM70012_MISC_TOP_MISC2
++ ***************************************************************************/
++#define MISC2_GLOBAL_CTRL 0x00000d00 /* Global Control Register */
++#define MISC2_INTERNAL_STATUS 0x00000d04 /* Internal Status Register */
++#define MISC2_INTERNAL_STATUS_MUX_CTRL 0x00000d08 /* Internal Debug Mux Control */
++#define MISC2_DEBUG_FIFO_LENGTH 0x00000d0c /* Debug FIFO Length */
++
++
++/****************************************************************************
++ * BCM70012_MISC_TOP_MISC3
++ ***************************************************************************/
++#define MISC3_RESET_CTRL 0x00000e00 /* Reset Control Register */
++#define MISC3_BIST_CTRL 0x00000e04 /* BIST Control Register */
++#define MISC3_BIST_STATUS 0x00000e08 /* BIST Status Register */
++#define MISC3_RX_CHECKSUM 0x00000e0c /* Receive Checksum */
++#define MISC3_TX_CHECKSUM 0x00000e10 /* Transmit Checksum */
++#define MISC3_ECO_CTRL_CORE 0x00000e14 /* ECO Core Reset Control Register */
++#define MISC3_CSI_TEST_CTRL 0x00000e18 /* CSI Test Control Register */
++#define MISC3_HD_DVI_TEST_CTRL 0x00000e1c /* HD DVI Test Control Register */
++
++
++/****************************************************************************
++ * BCM70012_MISC_TOP_MISC_PERST
++ ***************************************************************************/
++#define MISC_PERST_ECO_CTRL_PERST 0x00000e80 /* ECO PCIE Reset Control Register */
++#define MISC_PERST_DECODER_CTRL 0x00000e84 /* Decoder Control Register */
++#define MISC_PERST_CCE_STATUS 0x00000e88 /* Config Copy Engine Status */
++#define MISC_PERST_PCIE_DEBUG 0x00000e8c /* PCIE Debug Control Register */
++#define MISC_PERST_PCIE_DEBUG_STATUS 0x00000e90 /* PCIE Debug Status Register */
++#define MISC_PERST_VREG_CTRL 0x00000e94 /* Voltage Regulator Control Register */
++#define MISC_PERST_MEM_CTRL 0x00000e98 /* Memory Control Register */
++#define MISC_PERST_CLOCK_CTRL 0x00000e9c /* Clock Control Register */
++
++
++/****************************************************************************
++ * BCM70012_MISC_TOP_GISB_ARBITER
++ ***************************************************************************/
++#define GISB_ARBITER_REVISION 0x00000f00 /* GISB ARBITER REVISION */
++#define GISB_ARBITER_SCRATCH 0x00000f04 /* GISB ARBITER Scratch Register */
++#define GISB_ARBITER_REQ_MASK 0x00000f08 /* GISB ARBITER Master Request Mask Register */
++#define GISB_ARBITER_TIMER 0x00000f0c /* GISB ARBITER Timer Value Register */
++
++
++/****************************************************************************
++ * BCM70012_OTP_TOP_OTP
++ ***************************************************************************/
++#define OTP_CONFIG_INFO 0x00001400 /* OTP Configuration Register */
++#define OTP_CMD 0x00001404 /* OTP Command Register */
++#define OTP_STATUS 0x00001408 /* OTP Status Register */
++#define OTP_CONTENT_MISC 0x0000140c /* Content : Miscellaneous Register */
++#define OTP_CONTENT_AES_0 0x00001410 /* Content : AES Key 0 Register */
++#define OTP_CONTENT_AES_1 0x00001414 /* Content : AES Key 1 Register */
++#define OTP_CONTENT_AES_2 0x00001418 /* Content : AES Key 2 Register */
++#define OTP_CONTENT_AES_3 0x0000141c /* Content : AES Key 3 Register */
++#define OTP_CONTENT_SHA_0 0x00001420 /* Content : SHA Key 0 Register */
++#define OTP_CONTENT_SHA_1 0x00001424 /* Content : SHA Key 1 Register */
++#define OTP_CONTENT_SHA_2 0x00001428 /* Content : SHA Key 2 Register */
++#define OTP_CONTENT_SHA_3 0x0000142c /* Content : SHA Key 3 Register */
++#define OTP_CONTENT_SHA_4 0x00001430 /* Content : SHA Key 4 Register */
++#define OTP_CONTENT_SHA_5 0x00001434 /* Content : SHA Key 5 Register */
++#define OTP_CONTENT_SHA_6 0x00001438 /* Content : SHA Key 6 Register */
++#define OTP_CONTENT_SHA_7 0x0000143c /* Content : SHA Key 7 Register */
++#define OTP_CONTENT_CHECKSUM 0x00001440 /* Content : Checksum Register */
++#define OTP_PROG_CTRL 0x00001444 /* Programming Control Register */
++#define OTP_PROG_STATUS 0x00001448 /* Programming Status Register */
++#define OTP_PROG_PULSE 0x0000144c /* Program Pulse Width Register */
++#define OTP_VERIFY_PULSE 0x00001450 /* Verify Pulse Width Register */
++#define OTP_PROG_MASK 0x00001454 /* Program Mask Register */
++#define OTP_DATA_INPUT 0x00001458 /* Data Input Register */
++#define OTP_DATA_OUTPUT 0x0000145c /* Data Output Register */
++
++
++/****************************************************************************
++ * BCM70012_AES_TOP_AES
++ ***************************************************************************/
++#define AES_CONFIG_INFO 0x00001800 /* AES Configuration Information Register */
++#define AES_CMD 0x00001804 /* AES Command Register */
++#define AES_STATUS 0x00001808 /* AES Status Register */
++#define AES_EEPROM_CONFIG 0x0000180c /* AES EEPROM Configuration Register */
++#define AES_EEPROM_DATA_0 0x00001810 /* AES EEPROM Data Register 0 */
++#define AES_EEPROM_DATA_1 0x00001814 /* AES EEPROM Data Register 1 */
++#define AES_EEPROM_DATA_2 0x00001818 /* AES EEPROM Data Register 2 */
++#define AES_EEPROM_DATA_3 0x0000181c /* AES EEPROM Data Register 3 */
++
++
++/****************************************************************************
++ * BCM70012_DCI_TOP_DCI
++ ***************************************************************************/
++#define DCI_CMD 0x00001c00 /* DCI Command Register */
++#define DCI_STATUS 0x00001c04 /* DCI Status Register */
++#define DCI_DRAM_BASE_ADDR 0x00001c08 /* DRAM Base Address Register */
++#define DCI_FIRMWARE_ADDR 0x00001c0c /* Firmware Address Register */
++#define DCI_FIRMWARE_DATA 0x00001c10 /* Firmware Data Register */
++#define DCI_SIGNATURE_DATA_0 0x00001c14 /* Signature Data Register 0 */
++#define DCI_SIGNATURE_DATA_1 0x00001c18 /* Signature Data Register 1 */
++#define DCI_SIGNATURE_DATA_2 0x00001c1c /* Signature Data Register 2 */
++#define DCI_SIGNATURE_DATA_3 0x00001c20 /* Signature Data Register 3 */
++#define DCI_SIGNATURE_DATA_4 0x00001c24 /* Signature Data Register 4 */
++#define DCI_SIGNATURE_DATA_5 0x00001c28 /* Signature Data Register 5 */
++#define DCI_SIGNATURE_DATA_6 0x00001c2c /* Signature Data Register 6 */
++#define DCI_SIGNATURE_DATA_7 0x00001c30 /* Signature Data Register 7 */
++
++
++/****************************************************************************
++ * BCM70012_TGT_TOP_INTR
++ ***************************************************************************/
++/****************************************************************************
++ * INTR :: INTR_STATUS
++ ***************************************************************************/
++/* INTR :: INTR_STATUS :: reserved0 [31:26] */
++#define INTR_INTR_STATUS_reserved0_MASK 0xfc000000
++#define INTR_INTR_STATUS_reserved0_ALIGN 0
++#define INTR_INTR_STATUS_reserved0_BITS 6
++#define INTR_INTR_STATUS_reserved0_SHIFT 26
++
++/* INTR :: INTR_STATUS :: PCIE_TGT_CA_ATTN [25:25] */
++#define INTR_INTR_STATUS_PCIE_TGT_CA_ATTN_MASK 0x02000000
++#define INTR_INTR_STATUS_PCIE_TGT_CA_ATTN_ALIGN 0
++#define INTR_INTR_STATUS_PCIE_TGT_CA_ATTN_BITS 1
++#define INTR_INTR_STATUS_PCIE_TGT_CA_ATTN_SHIFT 25
++
++/* INTR :: INTR_STATUS :: PCIE_TGT_UR_ATTN [24:24] */
++#define INTR_INTR_STATUS_PCIE_TGT_UR_ATTN_MASK 0x01000000
++#define INTR_INTR_STATUS_PCIE_TGT_UR_ATTN_ALIGN 0
++#define INTR_INTR_STATUS_PCIE_TGT_UR_ATTN_BITS 1
++#define INTR_INTR_STATUS_PCIE_TGT_UR_ATTN_SHIFT 24
++
++/* INTR :: INTR_STATUS :: reserved1 [23:14] */
++#define INTR_INTR_STATUS_reserved1_MASK 0x00ffc000
++#define INTR_INTR_STATUS_reserved1_ALIGN 0
++#define INTR_INTR_STATUS_reserved1_BITS 10
++#define INTR_INTR_STATUS_reserved1_SHIFT 14
++
++/* INTR :: INTR_STATUS :: L1_UV_RX_DMA_ERR_INTR [13:13] */
++#define INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_MASK 0x00002000
++#define INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_BITS 1
++#define INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_SHIFT 13
++
++/* INTR :: INTR_STATUS :: L1_UV_RX_DMA_DONE_INTR [12:12] */
++#define INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK 0x00001000
++#define INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_BITS 1
++#define INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_SHIFT 12
++
++/* INTR :: INTR_STATUS :: L1_Y_RX_DMA_ERR_INTR [11:11] */
++#define INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_MASK 0x00000800
++#define INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_BITS 1
++#define INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_SHIFT 11
++
++/* INTR :: INTR_STATUS :: L1_Y_RX_DMA_DONE_INTR [10:10] */
++#define INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK 0x00000400
++#define INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_BITS 1
++#define INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_SHIFT 10
++
++/* INTR :: INTR_STATUS :: L1_TX_DMA_ERR_INTR [09:09] */
++#define INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK 0x00000200
++#define INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_BITS 1
++#define INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_SHIFT 9
++
++/* INTR :: INTR_STATUS :: L1_TX_DMA_DONE_INTR [08:08] */
++#define INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK 0x00000100
++#define INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_BITS 1
++#define INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_SHIFT 8
++
++/* INTR :: INTR_STATUS :: reserved2 [07:06] */
++#define INTR_INTR_STATUS_reserved2_MASK 0x000000c0
++#define INTR_INTR_STATUS_reserved2_ALIGN 0
++#define INTR_INTR_STATUS_reserved2_BITS 2
++#define INTR_INTR_STATUS_reserved2_SHIFT 6
++
++/* INTR :: INTR_STATUS :: L0_UV_RX_DMA_ERR_INTR [05:05] */
++#define INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_MASK 0x00000020
++#define INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_BITS 1
++#define INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_SHIFT 5
++
++/* INTR :: INTR_STATUS :: L0_UV_RX_DMA_DONE_INTR [04:04] */
++#define INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK 0x00000010
++#define INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_BITS 1
++#define INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_SHIFT 4
++
++/* INTR :: INTR_STATUS :: L0_Y_RX_DMA_ERR_INTR [03:03] */
++#define INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_MASK 0x00000008
++#define INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_BITS 1
++#define INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_SHIFT 3
++
++/* INTR :: INTR_STATUS :: L0_Y_RX_DMA_DONE_INTR [02:02] */
++#define INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK 0x00000004
++#define INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_BITS 1
++#define INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_SHIFT 2
++
++/* INTR :: INTR_STATUS :: L0_TX_DMA_ERR_INTR [01:01] */
++#define INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK 0x00000002
++#define INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_BITS 1
++#define INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_SHIFT 1
++
++/* INTR :: INTR_STATUS :: L0_TX_DMA_DONE_INTR [00:00] */
++#define INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK 0x00000001
++#define INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_ALIGN 0
++#define INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_BITS 1
++#define INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_SHIFT 0
++
++
++/****************************************************************************
++ * MISC1 :: TX_SW_DESC_LIST_CTRL_STS
++ ***************************************************************************/
++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: reserved0 [31:04] */
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_reserved0_MASK 0xfffffff0
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_reserved0_ALIGN 0
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_reserved0_BITS 28
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_reserved0_SHIFT 4
++
++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: DMA_DATA_SERV_PTR [03:03] */
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DMA_DATA_SERV_PTR_MASK 0x00000008
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DMA_DATA_SERV_PTR_ALIGN 0
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DMA_DATA_SERV_PTR_BITS 1
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DMA_DATA_SERV_PTR_SHIFT 3
++
++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: DESC_SERV_PTR [02:02] */
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DESC_SERV_PTR_MASK 0x00000004
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DESC_SERV_PTR_ALIGN 0
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DESC_SERV_PTR_BITS 1
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DESC_SERV_PTR_SHIFT 2
++
++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: TX_DMA_HALT_ON_ERROR [01:01] */
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_HALT_ON_ERROR_MASK 0x00000002
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_HALT_ON_ERROR_ALIGN 0
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_HALT_ON_ERROR_BITS 1
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_HALT_ON_ERROR_SHIFT 1
++
++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: TX_DMA_RUN_STOP [00:00] */
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_MASK 0x00000001
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_ALIGN 0
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_BITS 1
++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_SHIFT 0
++
++
++/****************************************************************************
++ * MISC1 :: TX_DMA_ERROR_STATUS
++ ***************************************************************************/
++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved0 [31:10] */
++#define MISC1_TX_DMA_ERROR_STATUS_reserved0_MASK 0xfffffc00
++#define MISC1_TX_DMA_ERROR_STATUS_reserved0_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_reserved0_BITS 22
++#define MISC1_TX_DMA_ERROR_STATUS_reserved0_SHIFT 10
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L1_DESC_TX_ABORT_ERRORS [09:09] */
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK 0x00000200
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_SHIFT 9
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved1 [08:08] */
++#define MISC1_TX_DMA_ERROR_STATUS_reserved1_MASK 0x00000100
++#define MISC1_TX_DMA_ERROR_STATUS_reserved1_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_reserved1_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_reserved1_SHIFT 8
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L0_DESC_TX_ABORT_ERRORS [07:07] */
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK 0x00000080
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_SHIFT 7
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved2 [06:06] */
++#define MISC1_TX_DMA_ERROR_STATUS_reserved2_MASK 0x00000040
++#define MISC1_TX_DMA_ERROR_STATUS_reserved2_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_reserved2_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_reserved2_SHIFT 6
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L1_DMA_DATA_TX_ABORT_ERRORS [05:05] */
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK 0x00000020
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_SHIFT 5
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L1_FIFO_FULL_ERRORS [04:04] */
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK 0x00000010
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_SHIFT 4
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved3 [03:03] */
++#define MISC1_TX_DMA_ERROR_STATUS_reserved3_MASK 0x00000008
++#define MISC1_TX_DMA_ERROR_STATUS_reserved3_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_reserved3_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_reserved3_SHIFT 3
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L0_DMA_DATA_TX_ABORT_ERRORS [02:02] */
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK 0x00000004
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_SHIFT 2
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L0_FIFO_FULL_ERRORS [01:01] */
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK 0x00000002
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_SHIFT 1
++
++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved4 [00:00] */
++#define MISC1_TX_DMA_ERROR_STATUS_reserved4_MASK 0x00000001
++#define MISC1_TX_DMA_ERROR_STATUS_reserved4_ALIGN 0
++#define MISC1_TX_DMA_ERROR_STATUS_reserved4_BITS 1
++#define MISC1_TX_DMA_ERROR_STATUS_reserved4_SHIFT 0
++
++
++/****************************************************************************
++ * MISC1 :: Y_RX_ERROR_STATUS
++ ***************************************************************************/
++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved0 [31:14] */
++#define MISC1_Y_RX_ERROR_STATUS_reserved0_MASK 0xffffc000
++#define MISC1_Y_RX_ERROR_STATUS_reserved0_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_reserved0_BITS 18
++#define MISC1_Y_RX_ERROR_STATUS_reserved0_SHIFT 14
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L1_UNDERRUN_ERROR [13:13] */
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK 0x00002000
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_SHIFT 13
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L1_OVERRUN_ERROR [12:12] */
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK 0x00001000
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_SHIFT 12
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L0_UNDERRUN_ERROR [11:11] */
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK 0x00000800
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_SHIFT 11
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L0_OVERRUN_ERROR [10:10] */
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK 0x00000400
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_SHIFT 10
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L1_DESC_TX_ABORT_ERRORS [09:09] */
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK 0x00000200
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_SHIFT 9
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved1 [08:08] */
++#define MISC1_Y_RX_ERROR_STATUS_reserved1_MASK 0x00000100
++#define MISC1_Y_RX_ERROR_STATUS_reserved1_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_reserved1_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_reserved1_SHIFT 8
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L0_DESC_TX_ABORT_ERRORS [07:07] */
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK 0x00000080
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_SHIFT 7
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved2 [06:05] */
++#define MISC1_Y_RX_ERROR_STATUS_reserved2_MASK 0x00000060
++#define MISC1_Y_RX_ERROR_STATUS_reserved2_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_reserved2_BITS 2
++#define MISC1_Y_RX_ERROR_STATUS_reserved2_SHIFT 5
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L1_FIFO_FULL_ERRORS [04:04] */
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK 0x00000010
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_SHIFT 4
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved3 [03:02] */
++#define MISC1_Y_RX_ERROR_STATUS_reserved3_MASK 0x0000000c
++#define MISC1_Y_RX_ERROR_STATUS_reserved3_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_reserved3_BITS 2
++#define MISC1_Y_RX_ERROR_STATUS_reserved3_SHIFT 2
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L0_FIFO_FULL_ERRORS [01:01] */
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK 0x00000002
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_SHIFT 1
++
++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved4 [00:00] */
++#define MISC1_Y_RX_ERROR_STATUS_reserved4_MASK 0x00000001
++#define MISC1_Y_RX_ERROR_STATUS_reserved4_ALIGN 0
++#define MISC1_Y_RX_ERROR_STATUS_reserved4_BITS 1
++#define MISC1_Y_RX_ERROR_STATUS_reserved4_SHIFT 0
++
++
++/****************************************************************************
++ * MISC1 :: UV_RX_ERROR_STATUS
++ ***************************************************************************/
++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved0 [31:14] */
++#define MISC1_UV_RX_ERROR_STATUS_reserved0_MASK 0xffffc000
++#define MISC1_UV_RX_ERROR_STATUS_reserved0_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_reserved0_BITS 18
++#define MISC1_UV_RX_ERROR_STATUS_reserved0_SHIFT 14
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L1_UNDERRUN_ERROR [13:13] */
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK 0x00002000
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_SHIFT 13
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L1_OVERRUN_ERROR [12:12] */
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK 0x00001000
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_SHIFT 12
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L0_UNDERRUN_ERROR [11:11] */
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK 0x00000800
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_SHIFT 11
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L0_OVERRUN_ERROR [10:10] */
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK 0x00000400
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_SHIFT 10
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L1_DESC_TX_ABORT_ERRORS [09:09] */
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK 0x00000200
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_SHIFT 9
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved1 [08:08] */
++#define MISC1_UV_RX_ERROR_STATUS_reserved1_MASK 0x00000100
++#define MISC1_UV_RX_ERROR_STATUS_reserved1_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_reserved1_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_reserved1_SHIFT 8
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L0_DESC_TX_ABORT_ERRORS [07:07] */
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK 0x00000080
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_SHIFT 7
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved2 [06:05] */
++#define MISC1_UV_RX_ERROR_STATUS_reserved2_MASK 0x00000060
++#define MISC1_UV_RX_ERROR_STATUS_reserved2_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_reserved2_BITS 2
++#define MISC1_UV_RX_ERROR_STATUS_reserved2_SHIFT 5
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L1_FIFO_FULL_ERRORS [04:04] */
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK 0x00000010
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_SHIFT 4
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved3 [03:02] */
++#define MISC1_UV_RX_ERROR_STATUS_reserved3_MASK 0x0000000c
++#define MISC1_UV_RX_ERROR_STATUS_reserved3_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_reserved3_BITS 2
++#define MISC1_UV_RX_ERROR_STATUS_reserved3_SHIFT 2
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L0_FIFO_FULL_ERRORS [01:01] */
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK 0x00000002
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_SHIFT 1
++
++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved4 [00:00] */
++#define MISC1_UV_RX_ERROR_STATUS_reserved4_MASK 0x00000001
++#define MISC1_UV_RX_ERROR_STATUS_reserved4_ALIGN 0
++#define MISC1_UV_RX_ERROR_STATUS_reserved4_BITS 1
++#define MISC1_UV_RX_ERROR_STATUS_reserved4_SHIFT 0
++
++/****************************************************************************
++ * Datatype Definitions.
++ ***************************************************************************/
++#endif /* #ifndef MACFILE_H__ */
++
++/* End of File */
++
+diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
+new file mode 100644
+index 0000000..39c641d
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_cmds.c
+@@ -0,0 +1,1058 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_cmds . c
++ *
++ * Description:
++ * BCM70010 Linux driver user command interfaces.
++ *
++ * HISTORY:
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#include "crystalhd_cmds.h"
++#include "crystalhd_hw.h"
++
++static struct crystalhd_user *bc_cproc_get_uid(struct crystalhd_cmd *ctx)
++{
++ struct crystalhd_user *user = NULL;
++ int i;
++
++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) {
++ if (!ctx->user[i].in_use) {
++ user = &ctx->user[i];
++ break;
++ }
++ }
++
++ return user;
++}
++
++static int bc_cproc_get_user_count(struct crystalhd_cmd *ctx)
++{
++ int i, count = 0;
++
++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) {
++ if (ctx->user[i].in_use)
++ count++;
++ }
++
++ return count;
++}
++
++static void bc_cproc_mark_pwr_state(struct crystalhd_cmd *ctx)
++{
++ int i;
++
++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) {
++ if (!ctx->user[i].in_use)
++ continue;
++ if (ctx->user[i].mode == DTS_DIAG_MODE ||
++ ctx->user[i].mode == DTS_PLAYBACK_MODE) {
++ ctx->pwr_state_change = 1;
++ break;
++ }
++ }
++}
++
++static BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ int rc = 0, i = 0;
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (ctx->user[idata->u_id].mode != DTS_MODE_INV) {
++ BCMLOG_ERR("Close the handle first..\n");
++ return BC_STS_ERR_USAGE;
++ }
++ if (idata->udata.u.NotifyMode.Mode == DTS_MONITOR_MODE) {
++ ctx->user[idata->u_id].mode = idata->udata.u.NotifyMode.Mode;
++ return BC_STS_SUCCESS;
++ }
++ if (ctx->state != BC_LINK_INVALID) {
++ BCMLOG_ERR("Link invalid state %d \n", ctx->state);
++ return BC_STS_ERR_USAGE;
++ }
++ /* Check for duplicate playback sessions..*/
++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) {
++ if (ctx->user[i].mode == DTS_DIAG_MODE ||
++ ctx->user[i].mode == DTS_PLAYBACK_MODE) {
++ BCMLOG_ERR("multiple playback sessions are not "
++ "supported..\n");
++ return BC_STS_ERR_USAGE;
++ }
++ }
++ ctx->cin_wait_exit = 0;
++ ctx->user[idata->u_id].mode = idata->udata.u.NotifyMode.Mode;
++ /* Setup mmap pool for uaddr sgl mapping..*/
++ rc = crystalhd_create_dio_pool(ctx->adp, BC_LINK_MAX_SGLS);
++ if (rc)
++ return BC_STS_ERROR;
++
++ /* Setup Hardware DMA rings */
++ return crystalhd_hw_setup_dma_rings(&ctx->hw_ctx);
++}
++
++static BC_STATUS bc_cproc_get_version(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++ idata->udata.u.VerInfo.DriverMajor = crystalhd_kmod_major;
++ idata->udata.u.VerInfo.DriverMinor = crystalhd_kmod_minor;
++ idata->udata.u.VerInfo.DriverRevision = crystalhd_kmod_rev;
++ return BC_STS_SUCCESS;
++}
++
++
++static BC_STATUS bc_cproc_get_hwtype(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata)
++{
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ crystalhd_pci_cfg_rd(ctx->adp, 0, 2,
++ (uint32_t *)&idata->udata.u.hwType.PciVenId);
++ crystalhd_pci_cfg_rd(ctx->adp, 2, 2,
++ (uint32_t *)&idata->udata.u.hwType.PciDevId);
++ crystalhd_pci_cfg_rd(ctx->adp, 8, 1,
++ (uint32_t *)&idata->udata.u.hwType.HwRev);
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_reg_rd(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ if (!ctx || !idata)
++ return BC_STS_INV_ARG;
++ idata->udata.u.regAcc.Value = bc_dec_reg_rd(ctx->adp,
++ idata->udata.u.regAcc.Offset);
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_reg_wr(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ if (!ctx || !idata)
++ return BC_STS_INV_ARG;
++
++ bc_dec_reg_wr(ctx->adp, idata->udata.u.regAcc.Offset,
++ idata->udata.u.regAcc.Value);
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_link_reg_rd(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ if (!ctx || !idata)
++ return BC_STS_INV_ARG;
++
++ idata->udata.u.regAcc.Value = crystalhd_reg_rd(ctx->adp,
++ idata->udata.u.regAcc.Offset);
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_link_reg_wr(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ if (!ctx || !idata)
++ return BC_STS_INV_ARG;
++
++ crystalhd_reg_wr(ctx->adp, idata->udata.u.regAcc.Offset,
++ idata->udata.u.regAcc.Value);
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_mem_rd(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!ctx || !idata || !idata->add_cdata)
++ return BC_STS_INV_ARG;
++
++ if (idata->udata.u.devMem.NumDwords > (idata->add_cdata_sz / 4)) {
++ BCMLOG_ERR("insufficient buffer\n");
++ return BC_STS_INV_ARG;
++ }
++ sts = crystalhd_mem_rd(ctx->adp, idata->udata.u.devMem.StartOff,
++ idata->udata.u.devMem.NumDwords,
++ (uint32_t *)idata->add_cdata);
++ return sts;
++
++}
++
++static BC_STATUS bc_cproc_mem_wr(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!ctx || !idata || !idata->add_cdata)
++ return BC_STS_INV_ARG;
++
++ if (idata->udata.u.devMem.NumDwords > (idata->add_cdata_sz / 4)) {
++ BCMLOG_ERR("insufficient buffer\n");
++ return BC_STS_INV_ARG;
++ }
++
++ sts = crystalhd_mem_wr(ctx->adp, idata->udata.u.devMem.StartOff,
++ idata->udata.u.devMem.NumDwords,
++ (uint32_t *)idata->add_cdata);
++ return sts;
++}
++
++static BC_STATUS bc_cproc_cfg_rd(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ uint32_t ix, cnt, off, len;
++ BC_STATUS sts = BC_STS_SUCCESS;
++ uint32_t *temp;
++
++ if (!ctx || !idata)
++ return BC_STS_INV_ARG;
++
++ temp = (uint32_t *) idata->udata.u.pciCfg.pci_cfg_space;
++ off = idata->udata.u.pciCfg.Offset;
++ len = idata->udata.u.pciCfg.Size;
++
++ if (len <= 4)
++ return crystalhd_pci_cfg_rd(ctx->adp, off, len, temp);
++
++ /* Truncate to dword alignment..*/
++ len = 4;
++ cnt = idata->udata.u.pciCfg.Size / len;
++ for (ix = 0; ix < cnt; ix++) {
++ sts = crystalhd_pci_cfg_rd(ctx->adp, off, len, &temp[ix]);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("config read : %d\n", sts);
++ return sts;
++ }
++ off += len;
++ }
++
++ return sts;
++}
++
++static BC_STATUS bc_cproc_cfg_wr(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ uint32_t ix, cnt, off, len;
++ BC_STATUS sts = BC_STS_SUCCESS;
++ uint32_t *temp;
++
++ if (!ctx || !idata)
++ return BC_STS_INV_ARG;
++
++ temp = (uint32_t *) idata->udata.u.pciCfg.pci_cfg_space;
++ off = idata->udata.u.pciCfg.Offset;
++ len = idata->udata.u.pciCfg.Size;
++
++ if (len <= 4)
++ return crystalhd_pci_cfg_wr(ctx->adp, off, len, temp[0]);
++
++ /* Truncate to dword alignment..*/
++ len = 4;
++ cnt = idata->udata.u.pciCfg.Size / len;
++ for (ix = 0; ix < cnt; ix++) {
++ sts = crystalhd_pci_cfg_wr(ctx->adp, off, len, temp[ix]);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("config write : %d\n", sts);
++ return sts;
++ }
++ off += len;
++ }
++
++ return sts;
++}
++
++static BC_STATUS bc_cproc_download_fw(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!ctx || !idata || !idata->add_cdata || !idata->add_cdata_sz) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (ctx->state != BC_LINK_INVALID) {
++ BCMLOG_ERR("Link invalid state %d \n", ctx->state);
++ return BC_STS_ERR_USAGE;
++ }
++
++ sts = crystalhd_download_fw(ctx->adp, (uint8_t *)idata->add_cdata,
++ idata->add_cdata_sz);
++
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("Firmware Download Failure!! - %d\n", sts);
++ } else
++ ctx->state |= BC_LINK_INIT;
++
++ return sts;
++}
++
++/*
++ * We use the FW_CMD interface to sync up playback state with application
++ * and firmware. This function will perform the required pre and post
++ * processing of the Firmware commands.
++ *
++ * Pause -
++ * Disable capture after decoder pause.
++ * Resume -
++ * First enable capture and issue decoder resume command.
++ * Flush -
++ * Abort pending input transfers and issue decoder flush command.
++ *
++ */
++static BC_STATUS bc_cproc_do_fw_cmd(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata)
++{
++ BC_STATUS sts;
++ uint32_t *cmd;
++
++ if (!(ctx->state & BC_LINK_INIT)) {
++ BCMLOG_ERR("Link invalid state %d \n", ctx->state);
++ return BC_STS_ERR_USAGE;
++ }
++
++ cmd = idata->udata.u.fwCmd.cmd;
++
++ /* Pre-Process */
++ if (cmd[0] == eCMD_C011_DEC_CHAN_PAUSE) {
++ if (!cmd[3]) {
++ ctx->state &= ~BC_LINK_PAUSED;
++ crystalhd_hw_unpause(&ctx->hw_ctx);
++ }
++ } else if (cmd[0] == eCMD_C011_DEC_CHAN_FLUSH) {
++ BCMLOG(BCMLOG_INFO, "Flush issued\n");
++ if (cmd[3])
++ ctx->cin_wait_exit = 1;
++ }
++
++ sts = crystalhd_do_fw_cmd(&ctx->hw_ctx, &idata->udata.u.fwCmd);
++
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG(BCMLOG_INFO, "fw cmd %x failed\n", cmd[0]);
++ return sts;
++ }
++
++ /* Post-Process */
++ if (cmd[0] == eCMD_C011_DEC_CHAN_PAUSE) {
++ if (cmd[3]) {
++ ctx->state |= BC_LINK_PAUSED;
++ crystalhd_hw_pause(&ctx->hw_ctx);
++ }
++ }
++
++ return sts;
++}
++
++static void bc_proc_in_completion(crystalhd_dio_req *dio_hnd,
++ wait_queue_head_t *event, BC_STATUS sts)
++{
++ if (!dio_hnd || !event) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return;
++ }
++ if (sts == BC_STS_IO_USER_ABORT)
++ return;
++
++ dio_hnd->uinfo.comp_sts = sts;
++ dio_hnd->uinfo.ev_sts = 1;
++ crystalhd_set_event(event);
++}
++
++static BC_STATUS bc_cproc_codein_sleep(struct crystalhd_cmd *ctx)
++{
++ wait_queue_head_t sleep_ev;
++ int rc = 0;
++
++ if (ctx->state & BC_LINK_SUSPEND)
++ return BC_STS_IO_USER_ABORT;
++
++ if (ctx->cin_wait_exit) {
++ ctx->cin_wait_exit = 0;
++ return BC_STS_CMD_CANCELLED;
++ }
++ crystalhd_create_event(&sleep_ev);
++ crystalhd_wait_on_event(&sleep_ev, 0, 100, rc, 0);
++ if (rc == -EINTR)
++ return BC_STS_IO_USER_ABORT;
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_hw_txdma(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata,
++ crystalhd_dio_req *dio)
++{
++ uint32_t tx_listid = 0;
++ BC_STATUS sts = BC_STS_SUCCESS;
++ wait_queue_head_t event;
++ int rc = 0;
++
++ if (!ctx || !idata || !dio) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ crystalhd_create_event(&event);
++
++ ctx->tx_list_id = 0;
++ /* msleep_interruptible(2000); */
++ sts = crystalhd_hw_post_tx(&ctx->hw_ctx, dio, bc_proc_in_completion,
++ &event, &tx_listid,
++ idata->udata.u.ProcInput.Encrypted);
++
++ while (sts == BC_STS_BUSY) {
++ sts = bc_cproc_codein_sleep(ctx);
++ if (sts != BC_STS_SUCCESS)
++ break;
++ sts = crystalhd_hw_post_tx(&ctx->hw_ctx, dio,
++ bc_proc_in_completion,
++ &event, &tx_listid,
++ idata->udata.u.ProcInput.Encrypted);
++ }
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG(BCMLOG_DBG, "_hw_txdma returning sts:%d\n", sts);
++ return sts;
++ }
++ if (ctx->cin_wait_exit)
++ ctx->cin_wait_exit = 0;
++
++ ctx->tx_list_id = tx_listid;
++
++ /* _post() succeeded.. wait for the completion. */
++ crystalhd_wait_on_event(&event, (dio->uinfo.ev_sts), 3000, rc, 0);
++ ctx->tx_list_id = 0;
++ if (!rc) {
++ return dio->uinfo.comp_sts;
++ } else if (rc == -EBUSY) {
++ BCMLOG(BCMLOG_DBG, "_tx_post() T/O \n");
++ sts = BC_STS_TIMEOUT;
++ } else if (rc == -EINTR) {
++ BCMLOG(BCMLOG_DBG, "Tx Wait Signal int.\n");
++ sts = BC_STS_IO_USER_ABORT;
++ } else {
++ sts = BC_STS_IO_ERROR;
++ }
++
++ /* We are cancelling the IO from the same context as the _post().
++ * so no need to wait on the event again.. the return itself
++ * ensures the release of our resources.
++ */
++ crystalhd_hw_cancel_tx(&ctx->hw_ctx, tx_listid);
++
++ return sts;
++}
++
++/* Helper function to check on user buffers */
++static BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_sz,
++ uint32_t uv_off, bool en_422)
++{
++ if (!ubuff || !ub_sz) {
++ BCMLOG_ERR("%s->Invalid Arg %p %x\n",
++ ((pin) ? "TX" : "RX"), ubuff, ub_sz);
++ return BC_STS_INV_ARG;
++ }
++
++ /* Check for alignment */
++ if (((uintptr_t)ubuff) & 0x03) {
++ BCMLOG_ERR("%s-->Un-aligned address not implemented yet.. %p \n",
++ ((pin) ? "TX" : "RX"), ubuff);
++ return BC_STS_NOT_IMPL;
++ }
++ if (pin)
++ return BC_STS_SUCCESS;
++
++ if (!en_422 && !uv_off) {
++ BCMLOG_ERR("Need UV offset for 420 mode.\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (en_422 && uv_off) {
++ BCMLOG_ERR("UV offset in 422 mode ??\n");
++ return BC_STS_INV_ARG;
++ }
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_proc_input(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata)
++{
++ void *ubuff;
++ uint32_t ub_sz;
++ crystalhd_dio_req *dio_hnd = NULL;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ ubuff = idata->udata.u.ProcInput.pDmaBuff;
++ ub_sz = idata->udata.u.ProcInput.BuffSz;
++
++ sts = bc_cproc_check_inbuffs(1, ubuff, ub_sz, 0, 0);
++ if (sts != BC_STS_SUCCESS)
++ return sts;
++
++ sts = crystalhd_map_dio(ctx->adp, ubuff, ub_sz, 0, 0, 1, &dio_hnd);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("dio map - %d \n", sts);
++ return sts;
++ }
++
++ if (!dio_hnd)
++ return BC_STS_ERROR;
++
++ sts = bc_cproc_hw_txdma(ctx, idata, dio_hnd);
++
++ crystalhd_unmap_dio(ctx->adp, dio_hnd);
++
++ return sts;
++}
++
++static BC_STATUS bc_cproc_add_cap_buff(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ void *ubuff;
++ uint32_t ub_sz, uv_off;
++ bool en_422;
++ crystalhd_dio_req *dio_hnd = NULL;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ ubuff = idata->udata.u.RxBuffs.YuvBuff;
++ ub_sz = idata->udata.u.RxBuffs.YuvBuffSz;
++ uv_off = idata->udata.u.RxBuffs.UVbuffOffset;
++ en_422 = idata->udata.u.RxBuffs.b422Mode;
++
++ sts = bc_cproc_check_inbuffs(0, ubuff, ub_sz, uv_off, en_422);
++ if (sts != BC_STS_SUCCESS)
++ return sts;
++
++ sts = crystalhd_map_dio(ctx->adp, ubuff, ub_sz, uv_off,
++ en_422, 0, &dio_hnd);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("dio map - %d \n", sts);
++ return sts;
++ }
++
++ if (!dio_hnd)
++ return BC_STS_ERROR;
++
++ sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio_hnd, (ctx->state == BC_LINK_READY));
++ if ((sts != BC_STS_SUCCESS) && (sts != BC_STS_BUSY)) {
++ crystalhd_unmap_dio(ctx->adp, dio_hnd);
++ return sts;
++ }
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_fmt_change(struct crystalhd_cmd *ctx,
++ crystalhd_dio_req *dio)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio, 0);
++ if (sts != BC_STS_SUCCESS)
++ return sts;
++
++ ctx->state |= BC_LINK_FMT_CHG;
++ if (ctx->state == BC_LINK_READY)
++ sts = crystalhd_hw_start_capture(&ctx->hw_ctx);
++
++ return sts;
++}
++
++static BC_STATUS bc_cproc_fetch_frame(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ crystalhd_dio_req *dio = NULL;
++ BC_STATUS sts = BC_STS_SUCCESS;
++ BC_DEC_OUT_BUFF *frame;
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (!(ctx->state & BC_LINK_CAP_EN)) {
++ BCMLOG(BCMLOG_DBG, "Capture not enabled..%x\n", ctx->state);
++ return BC_STS_ERR_USAGE;
++ }
++
++ frame = &idata->udata.u.DecOutData;
++
++ sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx, &frame->PibInfo, &dio);
++ if (sts != BC_STS_SUCCESS)
++ return (ctx->state & BC_LINK_SUSPEND) ? BC_STS_IO_USER_ABORT : sts;
++
++ frame->Flags = dio->uinfo.comp_flags;
++
++ if (frame->Flags & COMP_FLAG_FMT_CHANGE)
++ return bc_cproc_fmt_change(ctx, dio);
++
++ frame->OutPutBuffs.YuvBuff = dio->uinfo.xfr_buff;
++ frame->OutPutBuffs.YuvBuffSz = dio->uinfo.xfr_len;
++ frame->OutPutBuffs.UVbuffOffset = dio->uinfo.uv_offset;
++ frame->OutPutBuffs.b422Mode = dio->uinfo.b422mode;
++
++ frame->OutPutBuffs.YBuffDoneSz = dio->uinfo.y_done_sz;
++ frame->OutPutBuffs.UVBuffDoneSz = dio->uinfo.uv_done_sz;
++
++ crystalhd_unmap_dio(ctx->adp, dio);
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_start_capture(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ ctx->state |= BC_LINK_CAP_EN;
++ if (ctx->state == BC_LINK_READY)
++ return crystalhd_hw_start_capture(&ctx->hw_ctx);
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_flush_cap_buffs(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ crystalhd_dio_req *dio = NULL;
++ BC_STATUS sts = BC_STS_SUCCESS;
++ BC_DEC_OUT_BUFF *frame;
++ uint32_t count;
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (!(ctx->state & BC_LINK_CAP_EN))
++ return BC_STS_ERR_USAGE;
++
++ /* We should ack flush even when we are in paused/suspend state */
++ if (!(ctx->state & BC_LINK_READY))
++ return crystalhd_hw_stop_capture(&ctx->hw_ctx);
++
++ ctx->state &= ~(BC_LINK_CAP_EN|BC_LINK_FMT_CHG);
++
++ frame = &idata->udata.u.DecOutData;
++ for (count = 0; count < BC_RX_LIST_CNT; count++) {
++
++ sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx, &frame->PibInfo, &dio);
++ if (sts != BC_STS_SUCCESS)
++ break;
++
++ crystalhd_unmap_dio(ctx->adp, dio);
++ }
++
++ return crystalhd_hw_stop_capture(&ctx->hw_ctx);
++}
++
++static BC_STATUS bc_cproc_get_stats(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ BC_DTS_STATS *stats;
++ struct crystalhd_hw_stats hw_stats;
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ crystalhd_hw_stats(&ctx->hw_ctx, &hw_stats);
++
++ stats = &idata->udata.u.drvStat;
++ stats->drvRLL = hw_stats.rdyq_count;
++ stats->drvFLL = hw_stats.freeq_count;
++ stats->DrvTotalFrmDropped = hw_stats.rx_errors;
++ stats->DrvTotalHWErrs = hw_stats.rx_errors + hw_stats.tx_errors;
++ stats->intCount = hw_stats.num_interrupts;
++ stats->DrvIgnIntrCnt = hw_stats.num_interrupts -
++ hw_stats.dev_interrupts;
++ stats->TxFifoBsyCnt = hw_stats.cin_busy;
++ stats->pauseCount = hw_stats.pause_cnt;
++
++ if (ctx->pwr_state_change)
++ stats->pwr_state_change = 1;
++ if (ctx->state & BC_LINK_PAUSED)
++ stats->DrvPauseTime = 1;
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_reset_stats(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ crystalhd_hw_stats(&ctx->hw_ctx, NULL);
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS bc_cproc_chg_clk(struct crystalhd_cmd *ctx,
++ crystalhd_ioctl_data *idata)
++{
++ BC_CLOCK *clock;
++ uint32_t oldClk;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ clock = &idata->udata.u.clockValue;
++ oldClk = ctx->hw_ctx.core_clock_mhz;
++ ctx->hw_ctx.core_clock_mhz = clock->clk;
++
++ if (ctx->state & BC_LINK_READY) {
++ sts = crystalhd_hw_set_core_clock(&ctx->hw_ctx);
++ if (sts == BC_STS_CLK_NOCHG)
++ ctx->hw_ctx.core_clock_mhz = oldClk;
++ }
++
++ clock->clk = ctx->hw_ctx.core_clock_mhz;
++
++ return sts;
++}
++
++/*=============== Cmd Proc Table.. ======================================*/
++static const crystalhd_cmd_tbl_t g_crystalhd_cproc_tbl[] = {
++ { BCM_IOC_GET_VERSION, bc_cproc_get_version, 0},
++ { BCM_IOC_GET_HWTYPE, bc_cproc_get_hwtype, 0},
++ { BCM_IOC_REG_RD, bc_cproc_reg_rd, 0},
++ { BCM_IOC_REG_WR, bc_cproc_reg_wr, 0},
++ { BCM_IOC_FPGA_RD, bc_cproc_link_reg_rd, 0},
++ { BCM_IOC_FPGA_WR, bc_cproc_link_reg_wr, 0},
++ { BCM_IOC_MEM_RD, bc_cproc_mem_rd, 0},
++ { BCM_IOC_MEM_WR, bc_cproc_mem_wr, 0},
++ { BCM_IOC_RD_PCI_CFG, bc_cproc_cfg_rd, 0},
++ { BCM_IOC_WR_PCI_CFG, bc_cproc_cfg_wr, 1},
++ { BCM_IOC_FW_DOWNLOAD, bc_cproc_download_fw, 1},
++ { BCM_IOC_FW_CMD, bc_cproc_do_fw_cmd, 1},
++ { BCM_IOC_PROC_INPUT, bc_cproc_proc_input, 1},
++ { BCM_IOC_ADD_RXBUFFS, bc_cproc_add_cap_buff, 1},
++ { BCM_IOC_FETCH_RXBUFF, bc_cproc_fetch_frame, 1},
++ { BCM_IOC_START_RX_CAP, bc_cproc_start_capture, 1},
++ { BCM_IOC_FLUSH_RX_CAP, bc_cproc_flush_cap_buffs, 1},
++ { BCM_IOC_GET_DRV_STAT, bc_cproc_get_stats, 0},
++ { BCM_IOC_RST_DRV_STAT, bc_cproc_reset_stats, 0},
++ { BCM_IOC_NOTIFY_MODE, bc_cproc_notify_mode, 0},
++ { BCM_IOC_CHG_CLK, bc_cproc_chg_clk, 0},
++ { BCM_IOC_END, NULL},
++};
++
++/*=============== Cmd Proc Functions.. ===================================*/
++
++/**
++ * crystalhd_suspend - Power management suspend request.
++ * @ctx: Command layer context.
++ * @idata: Iodata - required for internal use.
++ *
++ * Return:
++ * status
++ *
++ * 1. Set the state to Suspend.
++ * 2. Flush the Rx Buffers it will unmap all the buffers and
++ * stop the RxDMA engine.
++ * 3. Cancel The TX Io and Stop Dma Engine.
++ * 4. Put the DDR in to deep sleep.
++ * 5. Stop the hardware putting it in to Reset State.
++ *
++ * Current gstreamer frame work does not provide any power management
++ * related notification to user mode decoder plug-in. As a work-around
++ * we pass on the power mangement notification to our plug-in by completing
++ * all outstanding requests with BC_STS_IO_USER_ABORT return code.
++ */
++BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!ctx || !idata) {
++ BCMLOG_ERR("Invalid Parameters\n");
++ return BC_STS_ERROR;
++ }
++
++ if (ctx->state & BC_LINK_SUSPEND)
++ return BC_STS_SUCCESS;
++
++ if (ctx->state == BC_LINK_INVALID) {
++ BCMLOG(BCMLOG_DBG, "Nothing To Do Suspend Success\n");
++ return BC_STS_SUCCESS;
++ }
++
++ ctx->state |= BC_LINK_SUSPEND;
++
++ bc_cproc_mark_pwr_state(ctx);
++
++ if (ctx->state & BC_LINK_CAP_EN) {
++ sts = bc_cproc_flush_cap_buffs(ctx, idata);
++ if (sts != BC_STS_SUCCESS)
++ return sts;
++ }
++
++ if (ctx->tx_list_id) {
++ sts = crystalhd_hw_cancel_tx(&ctx->hw_ctx, ctx->tx_list_id);
++ if (sts != BC_STS_SUCCESS)
++ return sts;
++ }
++
++ sts = crystalhd_hw_suspend(&ctx->hw_ctx);
++ if (sts != BC_STS_SUCCESS)
++ return sts;
++
++ BCMLOG(BCMLOG_DBG, "BCM70012 suspend success\n");
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_resume - Resume frame capture.
++ * @ctx: Command layer contextx.
++ *
++ * Return:
++ * status
++ *
++ *
++ * Resume frame capture.
++ *
++ * PM_Resume can't resume the playback state back to pre-suspend state
++ * because we don't keep video clip related information within driver.
++ * To get back to the pre-suspend state App will re-open the device and
++ * start a new playback session from the pre-suspend clip position.
++ *
++ */
++BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx)
++{
++ BCMLOG(BCMLOG_DBG, "crystalhd_resume Success %x\n", ctx->state);
++
++ bc_cproc_mark_pwr_state(ctx);
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_user_open - Create application handle.
++ * @ctx: Command layer contextx.
++ * @user_ctx: User ID context.
++ *
++ * Return:
++ * status
++ *
++ * Creates an application specific UID and allocates
++ * application specific resources. HW layer initialization
++ * is done for the first open request.
++ */
++BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx,
++ struct crystalhd_user **user_ctx)
++{
++ struct crystalhd_user *uc;
++
++ if (!ctx || !user_ctx) {
++ BCMLOG_ERR("Invalid arg..\n");
++ return BC_STS_INV_ARG;
++ }
++
++ uc = bc_cproc_get_uid(ctx);
++ if (!uc) {
++ BCMLOG(BCMLOG_INFO, "No free user context...\n");
++ return BC_STS_BUSY;
++ }
++
++ BCMLOG(BCMLOG_INFO, "Opening new user[%x] handle\n", uc->uid);
++
++ crystalhd_hw_open(&ctx->hw_ctx, ctx->adp);
++
++ uc->in_use = 1;
++
++ *user_ctx = uc;
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_user_close - Close application handle.
++ * @ctx: Command layer contextx.
++ * @uc: User ID context.
++ *
++ * Return:
++ * status
++ *
++ * Closer aplication handle and release app specific
++ * resources.
++ */
++BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc)
++{
++ uint32_t mode = uc->mode;
++
++ ctx->user[uc->uid].mode = DTS_MODE_INV;
++ ctx->user[uc->uid].in_use = 0;
++ ctx->cin_wait_exit = 1;
++ ctx->pwr_state_change = 0;
++
++ BCMLOG(BCMLOG_INFO, "Closing user[%x] handle\n", uc->uid);
++
++ if ((mode == DTS_DIAG_MODE) || (mode == DTS_PLAYBACK_MODE)) {
++ crystalhd_hw_free_dma_rings(&ctx->hw_ctx);
++ crystalhd_destroy_dio_pool(ctx->adp);
++ } else if (bc_cproc_get_user_count(ctx)) {
++ return BC_STS_SUCCESS;
++ }
++
++ crystalhd_hw_close(&ctx->hw_ctx);
++
++ ctx->state = BC_LINK_INVALID;
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_setup_cmd_context - Setup Command layer resources.
++ * @ctx: Command layer contextx.
++ * @adp: Adapter context
++ *
++ * Return:
++ * status
++ *
++ * Called at the time of driver load.
++ */
++BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx,
++ struct crystalhd_adp *adp)
++{
++ int i = 0;
++
++ if (!ctx || !adp) {
++ BCMLOG_ERR("Invalid arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (ctx->adp)
++ BCMLOG(BCMLOG_DBG, "Resetting Cmd context delete missing..\n");
++
++ ctx->adp = adp;
++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) {
++ ctx->user[i].uid = i;
++ ctx->user[i].in_use = 0;
++ ctx->user[i].mode = DTS_MODE_INV;
++ }
++
++ /*Open and Close the Hardware to put it in to sleep state*/
++ crystalhd_hw_open(&ctx->hw_ctx, ctx->adp);
++ crystalhd_hw_close(&ctx->hw_ctx);
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_delete_cmd_context - Release Command layer resources.
++ * @ctx: Command layer contextx.
++ *
++ * Return:
++ * status
++ *
++ * Called at the time of driver un-load.
++ */
++BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx)
++{
++ BCMLOG(BCMLOG_DBG, "Deleting Command context..\n");
++
++ ctx->adp = NULL;
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_get_cmd_proc - Cproc table lookup.
++ * @ctx: Command layer contextx.
++ * @cmd: IOCTL command code.
++ * @uc: User ID context.
++ *
++ * Return:
++ * command proc function pointer
++ *
++ * This function checks the process context, application's
++ * mode of operation and returns the function pointer
++ * from the cproc table.
++ */
++crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cmd,
++ struct crystalhd_user *uc)
++{
++ crystalhd_cmd_proc cproc = NULL;
++ unsigned int i, tbl_sz;
++
++ if (!ctx) {
++ BCMLOG_ERR("Invalid arg.. Cmd[%d]\n", cmd);
++ return NULL;
++ }
++
++ if ((cmd != BCM_IOC_GET_DRV_STAT) && (ctx->state & BC_LINK_SUSPEND)) {
++ BCMLOG_ERR("Invalid State [suspend Set].. Cmd[%d]\n", cmd);
++ return NULL;
++ }
++
++ tbl_sz = sizeof(g_crystalhd_cproc_tbl) / sizeof(crystalhd_cmd_tbl_t);
++ for (i = 0; i < tbl_sz; i++) {
++ if (g_crystalhd_cproc_tbl[i].cmd_id == cmd) {
++ if ((uc->mode == DTS_MONITOR_MODE) &&
++ (g_crystalhd_cproc_tbl[i].block_mon)) {
++ BCMLOG(BCMLOG_INFO, "Blocking cmd %d \n", cmd);
++ break;
++ }
++ cproc = g_crystalhd_cproc_tbl[i].cmd_proc;
++ break;
++ }
++ }
++
++ return cproc;
++}
++
++/**
++ * crystalhd_cmd_interrupt - ISR entry point
++ * @ctx: Command layer contextx.
++ *
++ * Return:
++ * TRUE: If interrupt from bcm70012 device.
++ *
++ *
++ * ISR entry point from OS layer.
++ */
++bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx)
++{
++ if (!ctx) {
++ BCMLOG_ERR("Invalid arg..\n");
++ return 0;
++ }
++
++ return crystalhd_hw_interrupt(ctx->adp, &ctx->hw_ctx);
++}
+diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
+new file mode 100644
+index 0000000..6b290ae
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_cmds.h
+@@ -0,0 +1,88 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_cmds . h
++ *
++ * Description:
++ * BCM70010 Linux driver user command interfaces.
++ *
++ * HISTORY:
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#ifndef _CRYSTALHD_CMDS_H_
++#define _CRYSTALHD_CMDS_H_
++
++/*
++ * NOTE:: This is the main interface file between the Linux layer
++ * and the harware layer. This file will use the definitions
++ * from _dts_glob and dts_defs etc.. which are defined for
++ * windows.
++ */
++#include "crystalhd_misc.h"
++#include "crystalhd_hw.h"
++
++enum _crystalhd_state{
++ BC_LINK_INVALID = 0x00,
++ BC_LINK_INIT = 0x01,
++ BC_LINK_CAP_EN = 0x02,
++ BC_LINK_FMT_CHG = 0x04,
++ BC_LINK_SUSPEND = 0x10,
++ BC_LINK_PAUSED = 0x20,
++ BC_LINK_READY = (BC_LINK_INIT | BC_LINK_CAP_EN | BC_LINK_FMT_CHG),
++};
++
++struct crystalhd_user {
++ uint32_t uid;
++ uint32_t in_use;
++ uint32_t mode;
++};
++
++#define DTS_MODE_INV (-1)
++
++struct crystalhd_cmd {
++ uint32_t state;
++ struct crystalhd_adp *adp;
++ struct crystalhd_user user[BC_LINK_MAX_OPENS];
++
++ spinlock_t ctx_lock;
++ uint32_t tx_list_id;
++ uint32_t cin_wait_exit;
++ uint32_t pwr_state_change;
++ struct crystalhd_hw hw_ctx;
++};
++
++typedef BC_STATUS (*crystalhd_cmd_proc)(struct crystalhd_cmd *, crystalhd_ioctl_data *);
++
++typedef struct _crystalhd_cmd_tbl {
++ uint32_t cmd_id;
++ const crystalhd_cmd_proc cmd_proc;
++ uint32_t block_mon;
++} crystalhd_cmd_tbl_t;
++
++
++BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata);
++BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx);
++crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cmd,
++ struct crystalhd_user *uc);
++BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx, struct crystalhd_user **user_ctx);
++BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc);
++BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx, struct crystalhd_adp *adp);
++BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx);
++bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx);
++
++#endif
+diff --git a/drivers/staging/crystalhd/crystalhd_fw_if.h b/drivers/staging/crystalhd/crystalhd_fw_if.h
+new file mode 100644
+index 0000000..261cd19
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_fw_if.h
+@@ -0,0 +1,369 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_fw_if . h
++ *
++ * Description:
++ * BCM70012 Firmware interface definitions.
++ *
++ * HISTORY:
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#ifndef _CRYSTALHD_FW_IF_H_
++#define _CRYSTALHD_FW_IF_H_
++
++/* TBD: Pull in only required defs into this file.. */
++
++
++
++/* User Data Header */
++typedef struct user_data {
++ struct user_data *next;
++ uint32_t type;
++ uint32_t size;
++} UD_HDR;
++
++
++
++/*------------------------------------------------------*
++ * MPEG Extension to the PPB *
++ *------------------------------------------------------*/
++typedef struct {
++ uint32_t to_be_defined;
++ uint32_t valid;
++
++ /* Always valid, defaults to picture size if no
++ sequence display extension in the stream. */
++ uint32_t display_horizontal_size;
++ uint32_t display_vertical_size;
++
++ /* MPEG_VALID_PANSCAN
++ Offsets are a copy values from the MPEG stream. */
++ uint32_t offset_count;
++ int32_t horizontal_offset[3];
++ int32_t vertical_offset[3];
++
++ /* MPEG_VALID_USERDATA
++ User data is in the form of a linked list. */
++ int32_t userDataSize;
++ UD_HDR *userData;
++
++} PPB_MPEG;
++
++
++/*------------------------------------------------------*
++ * VC1 Extension to the PPB *
++ *------------------------------------------------------*/
++typedef struct {
++ uint32_t to_be_defined;
++ uint32_t valid;
++
++ /* Always valid, defaults to picture size if no
++ sequence display extension in the stream. */
++ uint32_t display_horizontal_size;
++ uint32_t display_vertical_size;
++
++ /* VC1 pan scan windows */
++ uint32_t num_panscan_windows;
++ int32_t ps_horiz_offset[4];
++ int32_t ps_vert_offset[4];
++ int32_t ps_width[4];
++ int32_t ps_height[4];
++
++ /* VC1_VALID_USERDATA
++ User data is in the form of a linked list. */
++ int32_t userDataSize;
++ UD_HDR *userData;
++
++} PPB_VC1;
++
++/*------------------------------------------------------*
++ * H.264 Extension to the PPB *
++ *------------------------------------------------------*/
++
++/**
++ * @brief Film grain SEI message.
++ *
++ * Content of the film grain SEI message.
++ */
++
++/* maximum number of model-values as for Thomson spec(standard says 5) */
++#define MAX_FGT_MODEL_VALUE (3)
++
++/* maximum number of intervals(as many as 256 intervals?) */
++#define MAX_FGT_VALUE_INTERVAL (256)
++
++typedef struct FGT_SEI {
++ struct FGT_SEI *next;
++ unsigned char model_values[3][MAX_FGT_VALUE_INTERVAL][MAX_FGT_MODEL_VALUE];
++ unsigned char upper_bound[3][MAX_FGT_VALUE_INTERVAL];
++ unsigned char lower_bound[3][MAX_FGT_VALUE_INTERVAL];
++
++ unsigned char cancel_flag; /* Cancel flag: 1 no film grain. */
++ unsigned char model_id; /* Model id. */
++
++ /* +unused SE based on Thomson spec */
++ unsigned char color_desc_flag; /* Separate color descrition flag. */
++ unsigned char bit_depth_luma; /* Bit depth luma minus 8. */
++ unsigned char bit_depth_chroma; /* Bit depth chroma minus 8. */
++ unsigned char full_range_flag; /* Full range flag. */
++ unsigned char color_primaries; /* Color primaries. */
++ unsigned char transfer_charact; /* Transfer characteristics. */
++ unsigned char matrix_coeff; /*< Matrix coefficients. */
++ /* -unused SE based on Thomson spec */
++
++ unsigned char blending_mode_id; /* Blending mode. */
++ unsigned char log2_scale_factor; /* Log2 scale factor (2-7). */
++ unsigned char comp_flag[3]; /* Components [0,2] parameters present flag. */
++ unsigned char num_intervals_minus1[3]; /* Number of intensity level intervals. */
++ unsigned char num_model_values[3]; /* Number of model values. */
++ uint16_t repetition_period; /* Repetition period (0-16384) */
++
++} FGT_SEI;
++
++typedef struct {
++ /* 'valid' specifies which fields (or sets of
++ * fields) below are valid. If the corresponding
++ * bit in 'valid' is NOT set then that field(s)
++ * is (are) not initialized. */
++ uint32_t valid;
++
++ int32_t poc_top; /* POC for Top Field/Frame */
++ int32_t poc_bottom; /* POC for Bottom Field */
++ uint32_t idr_pic_id;
++
++ /* H264_VALID_PANSCAN */
++ uint32_t pan_scan_count;
++ int32_t pan_scan_left[3];
++ int32_t pan_scan_right[3];
++ int32_t pan_scan_top[3];
++ int32_t pan_scan_bottom[3];
++
++ /* H264_VALID_CT_TYPE */
++ uint32_t ct_type_count;
++ uint32_t ct_type[3];
++
++ /* H264_VALID_SPS_CROP */
++ int32_t sps_crop_left;
++ int32_t sps_crop_right;
++ int32_t sps_crop_top;
++ int32_t sps_crop_bottom;
++
++ /* H264_VALID_VUI */
++ uint32_t chroma_top;
++ uint32_t chroma_bottom;
++
++ /* H264_VALID_USER */
++ uint32_t user_data_size;
++ UD_HDR *user_data;
++
++ /* H264 VALID FGT */
++ FGT_SEI *pfgt;
++
++} PPB_H264;
++
++typedef struct {
++ /* Common fields. */
++ uint32_t picture_number; /* Ordinal display number */
++ uint32_t video_buffer; /* Video (picbuf) number */
++ uint32_t video_address; /* Address of picbuf Y */
++ uint32_t video_address_uv; /* Address of picbuf UV */
++ uint32_t video_stripe; /* Picbuf stripe */
++ uint32_t video_width; /* Picbuf width */
++ uint32_t video_height; /* Picbuf height */
++
++ uint32_t channel_id; /* Decoder channel ID */
++ uint32_t status; /* reserved */
++ uint32_t width; /* pixels */
++ uint32_t height; /* pixels */
++ uint32_t chroma_format; /* see above */
++ uint32_t pulldown; /* see above */
++ uint32_t flags; /* see above */
++ uint32_t pts; /* 32 LSBs of PTS */
++ uint32_t protocol; /* protocolXXX (above) */
++
++ uint32_t frame_rate; /* see above */
++ uint32_t matrix_coeff; /* see above */
++ uint32_t aspect_ratio; /* see above */
++ uint32_t colour_primaries; /* see above */
++ uint32_t transfer_char; /* see above */
++ uint32_t pcr_offset; /* 45kHz if PCR type; else 27MHz */
++ uint32_t n_drop; /* Number of pictures to be dropped */
++
++ uint32_t custom_aspect_ratio_width_height;
++ /* upper 16-bits is Y and lower 16-bits is X */
++
++ uint32_t picture_tag; /* Indexing tag from BUD packets */
++ uint32_t picture_done_payload;
++ uint32_t picture_meta_payload;
++ uint32_t reserved[1];
++
++ /* Protocol-specific extensions. */
++ union {
++ PPB_H264 h264;
++ PPB_MPEG mpeg;
++ PPB_VC1 vc1;
++ } other;
++
++} PPB;
++
++typedef struct {
++ uint32_t bFormatChange;
++ uint32_t resolution;
++ uint32_t channelId;
++ uint32_t ppbPtr;
++ int32_t ptsStcOffset;
++ uint32_t zeroPanscanValid;
++ uint32_t dramOutBufAddr;
++ uint32_t yComponent;
++ PPB ppb;
++
++} C011_PIB;
++
++
++
++typedef struct {
++ uint32_t command;
++ uint32_t sequence;
++ uint32_t status;
++ uint32_t picBuf;
++ uint32_t picRelBuf;
++ uint32_t picInfoDeliveryQ;
++ uint32_t picInfoReleaseQ;
++ uint32_t channelStatus;
++ uint32_t userDataDeliveryQ;
++ uint32_t userDataReleaseQ;
++ uint32_t transportStreamCaptureAddr;
++ uint32_t asyncEventQ;
++
++} DecRspChannelStartVideo;
++
++#define eCMD_C011_CMD_BASE (0x73763000)
++
++/* host commands */
++typedef enum {
++ eCMD_TS_GET_NEXT_PIC = 0x7376F100, /* debug get next picture */
++ eCMD_TS_GET_LAST_PIC = 0x7376F102, /* debug get last pic status */
++ eCMD_TS_READ_WRITE_MEM = 0x7376F104, /* debug read write memory */
++
++ /* New API commands */
++ /* General commands */
++ eCMD_C011_INIT = eCMD_C011_CMD_BASE + 0x01,
++ eCMD_C011_RESET = eCMD_C011_CMD_BASE + 0x02,
++ eCMD_C011_SELF_TEST = eCMD_C011_CMD_BASE + 0x03,
++ eCMD_C011_GET_VERSION = eCMD_C011_CMD_BASE + 0x04,
++ eCMD_C011_GPIO = eCMD_C011_CMD_BASE + 0x05,
++ eCMD_C011_DEBUG_SETUP = eCMD_C011_CMD_BASE + 0x06,
++
++ /* Decoding commands */
++ eCMD_C011_DEC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x100,
++ eCMD_C011_DEC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x101,
++ eCMD_C011_DEC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x102,
++ eCMD_C011_DEC_CHAN_STATUS = eCMD_C011_CMD_BASE + 0x103,
++ eCMD_C011_DEC_CHAN_FLUSH = eCMD_C011_CMD_BASE + 0x104,
++ eCMD_C011_DEC_CHAN_TRICK_PLAY = eCMD_C011_CMD_BASE + 0x105,
++ eCMD_C011_DEC_CHAN_TS_PIDS = eCMD_C011_CMD_BASE + 0x106,
++ eCMD_C011_DEC_CHAN_PS_STREAM_ID = eCMD_C011_CMD_BASE + 0x107,
++ eCMD_C011_DEC_CHAN_INPUT_PARAMS = eCMD_C011_CMD_BASE + 0x108,
++ eCMD_C011_DEC_CHAN_VIDEO_OUTPUT = eCMD_C011_CMD_BASE + 0x109,
++ eCMD_C011_DEC_CHAN_OUTPUT_FORMAT = eCMD_C011_CMD_BASE + 0x10A,
++ eCMD_C011_DEC_CHAN_SCALING_FILTERS = eCMD_C011_CMD_BASE + 0x10B,
++ eCMD_C011_DEC_CHAN_OSD_MODE = eCMD_C011_CMD_BASE + 0x10D,
++ eCMD_C011_DEC_CHAN_DROP = eCMD_C011_CMD_BASE + 0x10E,
++ eCMD_C011_DEC_CHAN_RELEASE = eCMD_C011_CMD_BASE + 0x10F,
++ eCMD_C011_DEC_CHAN_STREAM_SETTINGS = eCMD_C011_CMD_BASE + 0x110,
++ eCMD_C011_DEC_CHAN_PAUSE_OUTPUT = eCMD_C011_CMD_BASE + 0x111,
++ eCMD_C011_DEC_CHAN_CHANGE = eCMD_C011_CMD_BASE + 0x112,
++ eCMD_C011_DEC_CHAN_SET_STC = eCMD_C011_CMD_BASE + 0x113,
++ eCMD_C011_DEC_CHAN_SET_PTS = eCMD_C011_CMD_BASE + 0x114,
++ eCMD_C011_DEC_CHAN_CC_MODE = eCMD_C011_CMD_BASE + 0x115,
++ eCMD_C011_DEC_CREATE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x116,
++ eCMD_C011_DEC_COPY_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x117,
++ eCMD_C011_DEC_DELETE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x118,
++ eCMD_C011_DEC_CHAN_SET_DECYPTION = eCMD_C011_CMD_BASE + 0x119,
++ eCMD_C011_DEC_CHAN_START_VIDEO = eCMD_C011_CMD_BASE + 0x11A,
++ eCMD_C011_DEC_CHAN_STOP_VIDEO = eCMD_C011_CMD_BASE + 0x11B,
++ eCMD_C011_DEC_CHAN_PIC_CAPTURE = eCMD_C011_CMD_BASE + 0x11C,
++ eCMD_C011_DEC_CHAN_PAUSE = eCMD_C011_CMD_BASE + 0x11D,
++ eCMD_C011_DEC_CHAN_PAUSE_STATE = eCMD_C011_CMD_BASE + 0x11E,
++ eCMD_C011_DEC_CHAN_SET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x11F,
++ eCMD_C011_DEC_CHAN_GET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x120,
++ eCMD_C011_DEC_CHAN_SET_FF_RATE = eCMD_C011_CMD_BASE + 0x121,
++ eCMD_C011_DEC_CHAN_GET_FF_RATE = eCMD_C011_CMD_BASE + 0x122,
++ eCMD_C011_DEC_CHAN_FRAME_ADVANCE = eCMD_C011_CMD_BASE + 0x123,
++ eCMD_C011_DEC_CHAN_SET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x124,
++ eCMD_C011_DEC_CHAN_GET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x125,
++ eCMD_C011_DEC_CHAN_FILL_PIC_BUF = eCMD_C011_CMD_BASE + 0x126,
++ eCMD_C011_DEC_CHAN_SET_CONTINUITY_CHECK = eCMD_C011_CMD_BASE + 0x127,
++ eCMD_C011_DEC_CHAN_GET_CONTINUITY_CHECK = eCMD_C011_CMD_BASE + 0x128,
++ eCMD_C011_DEC_CHAN_SET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x129,
++ eCMD_C011_DEC_CHAN_GET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x12A,
++ eCMD_C011_DEC_CHAN_REVERSE_FIELD_STATUS = eCMD_C011_CMD_BASE + 0x12B,
++ eCMD_C011_DEC_CHAN_I_PICTURE_FOUND = eCMD_C011_CMD_BASE + 0x12C,
++ eCMD_C011_DEC_CHAN_SET_PARAMETER = eCMD_C011_CMD_BASE + 0x12D,
++ eCMD_C011_DEC_CHAN_SET_USER_DATA_MODE = eCMD_C011_CMD_BASE + 0x12E,
++ eCMD_C011_DEC_CHAN_SET_PAUSE_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x12F,
++ eCMD_C011_DEC_CHAN_SET_SLOW_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x130,
++ eCMD_C011_DEC_CHAN_SET_FF_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x131,
++ eCMD_C011_DEC_CHAN_SET_DISPLAY_TIMING_MODE = eCMD_C011_CMD_BASE + 0x132,
++ eCMD_C011_DEC_CHAN_SET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x133,
++ eCMD_C011_DEC_CHAN_GET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x134,
++ eCMD_C011_DEC_CHAN_SET_REVERSE_FIELD = eCMD_C011_CMD_BASE + 0x135,
++ eCMD_C011_DEC_CHAN_STREAM_OPEN = eCMD_C011_CMD_BASE + 0x136,
++ eCMD_C011_DEC_CHAN_SET_PCR_PID = eCMD_C011_CMD_BASE + 0x137,
++ eCMD_C011_DEC_CHAN_SET_VID_PID = eCMD_C011_CMD_BASE + 0x138,
++ eCMD_C011_DEC_CHAN_SET_PAN_SCAN_MODE = eCMD_C011_CMD_BASE + 0x139,
++ eCMD_C011_DEC_CHAN_START_DISPLAY_AT_PTS = eCMD_C011_CMD_BASE + 0x140,
++ eCMD_C011_DEC_CHAN_STOP_DISPLAY_AT_PTS = eCMD_C011_CMD_BASE + 0x141,
++ eCMD_C011_DEC_CHAN_SET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x142,
++ eCMD_C011_DEC_CHAN_GET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x143,
++ eCMD_C011_DEC_CHAN_SET_HOST_TRICK_MODE = eCMD_C011_CMD_BASE + 0x144,
++ eCMD_C011_DEC_CHAN_SET_OPERATION_MODE = eCMD_C011_CMD_BASE + 0x145,
++ eCMD_C011_DEC_CHAN_DISPLAY_PAUSE_UNTO_PTS = eCMD_C011_CMD_BASE + 0x146,
++ eCMD_C011_DEC_CHAN_SET_PTS_STC_DIFF_THRESHOLD = eCMD_C011_CMD_BASE + 0x147,
++ eCMD_C011_DEC_CHAN_SEND_COMPRESSED_BUF = eCMD_C011_CMD_BASE + 0x148,
++ eCMD_C011_DEC_CHAN_SET_CLIPPING = eCMD_C011_CMD_BASE + 0x149,
++ eCMD_C011_DEC_CHAN_SET_PARAMETERS_FOR_HARD_RESET_INTERRUPT_TO_HOST
++ = eCMD_C011_CMD_BASE + 0x150,
++
++ /* Decoder RevD commands */
++ eCMD_C011_DEC_CHAN_SET_CSC = eCMD_C011_CMD_BASE + 0x180, /* color space conversion */
++ eCMD_C011_DEC_CHAN_SET_RANGE_REMAP = eCMD_C011_CMD_BASE + 0x181,
++ eCMD_C011_DEC_CHAN_SET_FGT = eCMD_C011_CMD_BASE + 0x182,
++ /* Note: 0x183 not implemented yet in Rev D main */
++ eCMD_C011_DEC_CHAN_SET_LASTPICTURE_PADDING = eCMD_C011_CMD_BASE + 0x183,
++
++ /* Decoder 7412 commands (7412-only) */
++ eCMD_C011_DEC_CHAN_SET_CONTENT_KEY = eCMD_C011_CMD_BASE + 0x190,
++ eCMD_C011_DEC_CHAN_SET_SESSION_KEY = eCMD_C011_CMD_BASE + 0x191,
++ eCMD_C011_DEC_CHAN_FMT_CHANGE_ACK = eCMD_C011_CMD_BASE + 0x192,
++
++ eCMD_C011_DEC_CHAN_CUSTOM_VIDOUT = eCMD_C011_CMD_BASE + 0x1FF,
++
++ /* Encoding commands */
++ eCMD_C011_ENC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x200,
++ eCMD_C011_ENC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x201,
++ eCMD_C011_ENC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x202,
++ eCMD_C011_ENC_CHAN_CONTROL = eCMD_C011_CMD_BASE + 0x203,
++ eCMD_C011_ENC_CHAN_STATISTICS = eCMD_C011_CMD_BASE + 0x204,
++
++ eNOTIFY_C011_ENC_CHAN_EVENT = eCMD_C011_CMD_BASE + 0x210,
++
++} eC011_TS_CMD;
++
++#endif
+diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
+new file mode 100644
+index 0000000..01819d3
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_hw.c
+@@ -0,0 +1,2395 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_hw . c
++ *
++ * Description:
++ * BCM70010 Linux driver HW layer.
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include "crystalhd_hw.h"
++
++/* Functions internal to this file */
++
++static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
++{
++ bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
++ bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
++}
++
++
++static void crystalhd_start_dram(struct crystalhd_adp *adp)
++{
++ bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) << 0) |
++ /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */
++ ((15 / 5 - 1) << 7) | /* trp */
++ ((10 / 5 - 1) << 10) | /* trrd */
++ ((15 / 5 + 1) << 12) | /* twr */
++ ((2 + 1) << 16) | /* twtr */
++ ((70 / 5 - 2) << 19) | /* trfc */
++ (0 << 23));
++
++ bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
++ bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
++ bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
++ bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
++ bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
++ bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
++ bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
++ /* setting the refresh rate here */
++ bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
++}
++
++
++static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
++{
++ link_misc_perst_deco_ctrl rst_deco_cntrl;
++ link_misc_perst_clk_ctrl rst_clk_cntrl;
++ uint32_t temp;
++
++ /*
++ * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
++ * delay to allow PLL to lock Clear alternate clock, stop clock bits
++ */
++ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
++ rst_clk_cntrl.pll_pwr_dn = 0;
++ crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
++ msleep_interruptible(50);
++
++ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
++ rst_clk_cntrl.stop_core_clk = 0;
++ rst_clk_cntrl.sel_alt_clk = 0;
++
++ crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
++ msleep_interruptible(50);
++
++ /*
++ * Bus Arbiter Timeout: GISB_ARBITER_TIMER
++ * Set internal bus arbiter timeout to 40us based on core clock speed
++ * (63MHz * 40us = 0x9D8)
++ */
++ crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
++
++ /*
++ * Decoder clocks: MISC_PERST_DECODER_CTRL
++ * Enable clocks while 7412 reset is asserted, delay
++ * De-assert 7412 reset
++ */
++ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
++ rst_deco_cntrl.stop_bcm_7412_clk = 0;
++ rst_deco_cntrl.bcm7412_rst = 1;
++ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
++ msleep_interruptible(10);
++
++ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
++ rst_deco_cntrl.bcm7412_rst = 0;
++ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
++ msleep_interruptible(50);
++
++ /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
++ crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
++
++ /* Clear bit 29 of 0x404 */
++ temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
++ temp &= ~BC_BIT(29);
++ crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
++
++ /* 2.5V regulator must be set to 2.6 volts (+6%) */
++ /* FIXME: jarod: what's the point of this reg read? */
++ temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
++ crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
++
++ return true;
++}
++
++static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
++{
++ link_misc_perst_deco_ctrl rst_deco_cntrl;
++ link_misc_perst_clk_ctrl rst_clk_cntrl;
++ uint32_t temp;
++
++ /*
++ * Decoder clocks: MISC_PERST_DECODER_CTRL
++ * Assert 7412 reset, delay
++ * Assert 7412 stop clock
++ */
++ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
++ rst_deco_cntrl.stop_bcm_7412_clk = 1;
++ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
++ msleep_interruptible(50);
++
++ /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
++ * Set internal bus arbiter timeout to 40us based on core clock speed
++ * (6.75MHZ * 40us = 0x10E)
++ */
++ crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
++
++ /* Link clocks: MISC_PERST_CLOCK_CTRL
++ * Stop core clk, delay
++ * Set alternate clk, delay, set PLL power down
++ */
++ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
++ rst_clk_cntrl.stop_core_clk = 1;
++ rst_clk_cntrl.sel_alt_clk = 1;
++ crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
++ msleep_interruptible(50);
++
++ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
++ rst_clk_cntrl.pll_pwr_dn = 1;
++ crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
++
++ /*
++ * Read and restore the Transaction Configuration Register
++ * after core reset
++ */
++ temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
++
++ /*
++ * Link core soft reset: MISC3_RESET_CTRL
++ * - Write BIT[0]=1 and read it back for core reset to take place
++ */
++ crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
++ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
++ msleep_interruptible(50);
++
++ /* restore the transaction configuration register */
++ crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
++
++ return true;
++}
++
++static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
++{
++ intr_mask_reg intr_mask;
++ intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
++ intr_mask.mask_pcie_err = 1;
++ intr_mask.mask_pcie_rbusmast_err = 1;
++ intr_mask.mask_pcie_rgr_bridge = 1;
++ intr_mask.mask_rx_done = 1;
++ intr_mask.mask_rx_err = 1;
++ intr_mask.mask_tx_done = 1;
++ intr_mask.mask_tx_err = 1;
++ crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
++
++ return;
++}
++
++static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
++{
++ intr_mask_reg intr_mask;
++ intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
++ intr_mask.mask_pcie_err = 1;
++ intr_mask.mask_pcie_rbusmast_err = 1;
++ intr_mask.mask_pcie_rgr_bridge = 1;
++ intr_mask.mask_rx_done = 1;
++ intr_mask.mask_rx_err = 1;
++ intr_mask.mask_tx_done = 1;
++ intr_mask.mask_tx_err = 1;
++ crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
++
++ return;
++}
++
++static void crystalhd_clear_errors(struct crystalhd_adp *adp)
++{
++ uint32_t reg;
++
++ /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */
++ reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
++ if (reg)
++ crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
++
++ reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
++ if (reg)
++ crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
++
++ reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
++ if (reg)
++ crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
++}
++
++static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
++{
++ uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
++
++ if (intr_sts) {
++ crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
++
++ /* Write End Of Interrupt for PCIE */
++ crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
++ }
++}
++
++static void crystalhd_soft_rst(struct crystalhd_adp *adp)
++{
++ uint32_t val;
++
++ /* Assert c011 soft reset*/
++ bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
++ msleep_interruptible(50);
++
++ /* Release c011 soft reset*/
++ bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
++
++ /* Disable Stuffing..*/
++ val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
++ val |= BC_BIT(8);
++ crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
++}
++
++static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
++{
++ uint32_t i = 0, reg;
++
++ crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
++
++ crystalhd_reg_wr(adp, AES_CMD, 0);
++ crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
++ crystalhd_reg_wr(adp, AES_CMD, 0x1);
++
++ /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */
++ for (i = 0; i < 100; ++i) {
++ reg = crystalhd_reg_rd(adp, AES_STATUS);
++ if (reg & 0x1)
++ return true;
++ msleep_interruptible(10);
++ }
++
++ return false;
++}
++
++
++static bool crystalhd_start_device(struct crystalhd_adp *adp)
++{
++ uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
++
++ BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
++
++ reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
++ reg_pwrmgmt &= ~ASPM_L1_ENABLE;
++
++ crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
++
++ if (!crystalhd_bring_out_of_rst(adp)) {
++ BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
++ return false;
++ }
++
++ crystalhd_disable_interrupts(adp);
++
++ crystalhd_clear_errors(adp);
++
++ crystalhd_clear_interrupts(adp);
++
++ crystalhd_enable_interrupts(adp);
++
++ /* Enable the option for getting the total no. of DWORDS
++ * that have been transfered by the RXDMA engine
++ */
++ dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
++ dbg_options |= 0x10;
++ crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
++
++ /* Enable PCI Global Control options */
++ glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
++ glb_cntrl |= 0x100;
++ glb_cntrl |= 0x8000;
++ crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
++
++ crystalhd_enable_interrupts(adp);
++
++ crystalhd_soft_rst(adp);
++ crystalhd_start_dram(adp);
++ crystalhd_enable_uarts(adp);
++
++ return true;
++}
++
++static bool crystalhd_stop_device(struct crystalhd_adp *adp)
++{
++ uint32_t reg;
++
++ BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
++ /* Clear and disable interrupts */
++ crystalhd_disable_interrupts(adp);
++ crystalhd_clear_errors(adp);
++ crystalhd_clear_interrupts(adp);
++
++ if (!crystalhd_put_in_reset(adp))
++ BCMLOG_ERR("Failed to Put Link To Reset State\n");
++
++ reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
++ reg |= ASPM_L1_ENABLE;
++ crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
++
++ /* Set PCI Clk Req */
++ reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
++ reg |= PCI_CLK_REQ_ENABLE;
++ crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
++
++ return true;
++}
++
++static crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
++{
++ unsigned long flags = 0;
++ crystalhd_rx_dma_pkt *temp = NULL;
++
++ if (!hw)
++ return NULL;
++
++ spin_lock_irqsave(&hw->lock, flags);
++ temp = hw->rx_pkt_pool_head;
++ if (temp) {
++ hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
++ temp->dio_req = NULL;
++ temp->pkt_tag = 0;
++ temp->flags = 0;
++ }
++ spin_unlock_irqrestore(&hw->lock, flags);
++
++ return temp;
++}
++
++static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
++ crystalhd_rx_dma_pkt *pkt)
++{
++ unsigned long flags = 0;
++
++ if (!hw || !pkt)
++ return;
++
++ spin_lock_irqsave(&hw->lock, flags);
++ pkt->next = hw->rx_pkt_pool_head;
++ hw->rx_pkt_pool_head = pkt;
++ spin_unlock_irqrestore(&hw->lock, flags);
++}
++
++/*
++ * Call back from TX - IOQ deletion.
++ *
++ * This routine will release the TX DMA rings allocated
++ * druing setup_dma rings interface.
++ *
++ * Memory is allocated per DMA ring basis. This is just
++ * a place holder to be able to create the dio queues.
++ */
++static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
++{
++}
++
++/*
++ * Rx Packet release callback..
++ *
++ * Release All user mapped capture buffers and Our DMA packets
++ * back to our free pool. The actual cleanup of the DMA
++ * ring descriptors happen during dma ring release.
++ */
++static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
++{
++ struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
++ crystalhd_rx_dma_pkt *pkt = (crystalhd_rx_dma_pkt *)data;
++
++ if (!pkt || !hw) {
++ BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
++ return;
++ }
++
++ if (pkt->dio_req)
++ crystalhd_unmap_dio(hw->adp, pkt->dio_req);
++ else
++ BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
++
++ crystalhd_hw_free_rx_pkt(hw, pkt);
++}
++
++#define crystalhd_hw_delete_ioq(adp, q) \
++ if (q) { \
++ crystalhd_delete_dioq(adp, q); \
++ q = NULL; \
++ }
++
++static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
++{
++ if (!hw)
++ return;
++
++ BCMLOG(BCMLOG_DBG, "Deleting IOQs \n");
++ crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
++ crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
++ crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
++ crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
++ crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
++}
++
++#define crystalhd_hw_create_ioq(sts, hw, q, cb) \
++do { \
++ sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \
++ if (sts != BC_STS_SUCCESS) \
++ goto hw_create_ioq_err; \
++} while (0)
++
++/*
++ * Create IOQs..
++ *
++ * TX - Active & Free
++ * RX - Active, Ready and Free.
++ */
++static BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!hw) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
++ crystalhd_tx_desc_rel_call_back);
++ crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
++ crystalhd_tx_desc_rel_call_back);
++
++ crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
++ crystalhd_rx_pkt_rel_call_back);
++ crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
++ crystalhd_rx_pkt_rel_call_back);
++ crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
++ crystalhd_rx_pkt_rel_call_back);
++
++ return sts;
++
++hw_create_ioq_err:
++ crystalhd_hw_delete_ioqs(hw);
++
++ return sts;
++}
++
++
++static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
++ bool b_188_byte_pkts, uint8_t flags)
++{
++ uint32_t base, end, writep, readp;
++ uint32_t cpbSize, cpbFullness, fifoSize;
++
++ if (flags & 0x02) { /* ASF Bit is set */
++ base = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
++ end = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
++ writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
++ readp = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
++ } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
++ base = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
++ end = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
++ writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
++ readp = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
++ } else {
++ base = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
++ end = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
++ writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
++ readp = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
++ }
++
++ cpbSize = end - base;
++ if (writep >= readp)
++ cpbFullness = writep - readp;
++ else
++ cpbFullness = (end - base) - (readp - writep);
++
++ fifoSize = cpbSize - cpbFullness;
++
++ if (fifoSize < BC_INFIFO_THRESHOLD)
++ return true;
++
++ if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
++ return true;
++
++ return false;
++}
++
++static BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
++ uint32_t list_id, BC_STATUS cs)
++{
++ tx_dma_pkt *tx_req;
++
++ if (!hw || !list_id) {
++ BCMLOG_ERR("Invalid Arg..\n");
++ return BC_STS_INV_ARG;
++ }
++
++ hw->pwr_lock--;
++
++ tx_req = (tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
++ if (!tx_req) {
++ if (cs != BC_STS_IO_USER_ABORT)
++ BCMLOG_ERR("Find and Fetch Did not find req\n");
++ return BC_STS_NO_DATA;
++ }
++
++ if (tx_req->call_back) {
++ tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
++ tx_req->dio_req = NULL;
++ tx_req->cb_event = NULL;
++ tx_req->call_back = NULL;
++ } else {
++ BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
++ tx_req->list_tag);
++ }
++
++ /* Now put back the tx_list back in FreeQ */
++ tx_req->list_tag = 0;
++
++ return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
++}
++
++static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
++{
++ uint32_t err_mask, tmp;
++ unsigned long flags = 0;
++
++ err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
++ MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
++ MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
++
++ if (!(err_sts & err_mask))
++ return false;
++
++ BCMLOG_ERR("Error on Tx-L0 %x \n", err_sts);
++
++ tmp = err_mask;
++
++ if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
++ tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
++
++ if (tmp) {
++ spin_lock_irqsave(&hw->lock, flags);
++ /* reset list index.*/
++ hw->tx_list_post_index = 0;
++ spin_unlock_irqrestore(&hw->lock, flags);
++ }
++
++ tmp = err_sts & err_mask;
++ crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
++
++ return true;
++}
++
++static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
++{
++ uint32_t err_mask, tmp;
++ unsigned long flags = 0;
++
++ err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
++ MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
++ MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
++
++ if (!(err_sts & err_mask))
++ return false;
++
++ BCMLOG_ERR("Error on Tx-L1 %x \n", err_sts);
++
++ tmp = err_mask;
++
++ if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
++ tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
++
++ if (tmp) {
++ spin_lock_irqsave(&hw->lock, flags);
++ /* reset list index.*/
++ hw->tx_list_post_index = 0;
++ spin_unlock_irqrestore(&hw->lock, flags);
++ }
++
++ tmp = err_sts & err_mask;
++ crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
++
++ return true;
++}
++
++static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
++{
++ uint32_t err_sts;
++
++ if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
++ crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
++ BC_STS_SUCCESS);
++
++ if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
++ crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
++ BC_STS_SUCCESS);
++
++ if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
++ INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
++ /* No error mask set.. */
++ return;
++ }
++
++ /* Handle Tx errors. */
++ err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
++
++ if (crystalhd_tx_list0_handler(hw, err_sts))
++ crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
++ BC_STS_ERROR);
++
++ if (crystalhd_tx_list1_handler(hw, err_sts))
++ crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
++ BC_STS_ERROR);
++
++ hw->stats.tx_errors++;
++}
++
++static void crystalhd_hw_dump_desc(pdma_descriptor p_dma_desc,
++ uint32_t ul_desc_index, uint32_t cnt)
++{
++ uint32_t ix, ll = 0;
++
++ if (!p_dma_desc || !cnt)
++ return;
++
++ /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than
++ * setting ll (log level, I presume) to non-zero? */
++ if (!ll)
++ return;
++
++ for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
++ BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
++ ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
++ ul_desc_index,
++ p_dma_desc[ul_desc_index].buff_addr_high,
++ p_dma_desc[ul_desc_index].buff_addr_low,
++ p_dma_desc[ul_desc_index].next_desc_addr_high,
++ p_dma_desc[ul_desc_index].next_desc_addr_low,
++ p_dma_desc[ul_desc_index].xfer_size,
++ p_dma_desc[ul_desc_index].intr_enable,
++ p_dma_desc[ul_desc_index].last_rec_indicator);
++ }
++
++}
++
++static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq,
++ dma_descriptor *desc,
++ dma_addr_t desc_paddr_base,
++ uint32_t sg_cnt, uint32_t sg_st_ix,
++ uint32_t sg_st_off, uint32_t xfr_sz)
++{
++ uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
++ dma_addr_t desc_phy_addr = desc_paddr_base;
++ addr_64 addr_temp;
++
++ if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
++ (!sg_cnt && !ioreq->uinfo.dir_tx)) {
++ BCMLOG_ERR("Invalid Args\n");
++ return BC_STS_INV_ARG;
++ }
++
++ for (ix = 0; ix < sg_cnt; ix++) {
++
++ /* Setup SGLE index. */
++ sg_ix = ix + sg_st_ix;
++
++ /* Get SGLE length */
++ len = crystalhd_get_sgle_len(ioreq, sg_ix);
++ if (len % 4) {
++ BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
++ return BC_STS_NOT_IMPL;
++ }
++ /* Setup DMA desc with Phy addr & Length at current index. */
++ addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
++ if (sg_ix == sg_st_ix) {
++ addr_temp.full_addr += sg_st_off;
++ len -= sg_st_off;
++ }
++ memset(&desc[ix], 0, sizeof(desc[ix]));
++ desc[ix].buff_addr_low = addr_temp.low_part;
++ desc[ix].buff_addr_high = addr_temp.high_part;
++ desc[ix].dma_dir = ioreq->uinfo.dir_tx;
++
++ /* Chain DMA descriptor. */
++ addr_temp.full_addr = desc_phy_addr + sizeof(dma_descriptor);
++ desc[ix].next_desc_addr_low = addr_temp.low_part;
++ desc[ix].next_desc_addr_high = addr_temp.high_part;
++
++ if ((count + len) > xfr_sz)
++ len = xfr_sz - count;
++
++ /* Debug.. */
++ if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
++ BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
++ len, ix, count, xfr_sz, sg_cnt);
++ return BC_STS_ERROR;
++ }
++ /* Length expects Multiple of 4 */
++ desc[ix].xfer_size = (len / 4);
++
++ crystalhd_hw_dump_desc(desc, ix, 1);
++
++ count += len;
++ desc_phy_addr += sizeof(dma_descriptor);
++ }
++
++ last_desc_ix = ix - 1;
++
++ if (ioreq->fb_size) {
++ memset(&desc[ix], 0, sizeof(desc[ix]));
++ addr_temp.full_addr = ioreq->fb_pa;
++ desc[ix].buff_addr_low = addr_temp.low_part;
++ desc[ix].buff_addr_high = addr_temp.high_part;
++ desc[ix].dma_dir = ioreq->uinfo.dir_tx;
++ desc[ix].xfer_size = 1;
++ desc[ix].fill_bytes = 4 - ioreq->fb_size;
++ count += ioreq->fb_size;
++ last_desc_ix++;
++ }
++
++ /* setup last descriptor..*/
++ desc[last_desc_ix].last_rec_indicator = 1;
++ desc[last_desc_ix].next_desc_addr_low = 0;
++ desc[last_desc_ix].next_desc_addr_high = 0;
++ desc[last_desc_ix].intr_enable = 1;
++
++ crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
++
++ if (count != xfr_sz) {
++ BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
++ return BC_STS_ERROR;
++ }
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS crystalhd_xlat_sgl_to_dma_desc(crystalhd_dio_req *ioreq,
++ pdma_desc_mem pdesc_mem,
++ uint32_t *uv_desc_index)
++{
++ dma_descriptor *desc = NULL;
++ dma_addr_t desc_paddr_base = 0;
++ uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
++ uint32_t xfr_sz = 0;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ /* Check params.. */
++ if (!ioreq || !pdesc_mem || !uv_desc_index) {
++ BCMLOG_ERR("Invalid Args\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
++ !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
++ BCMLOG_ERR("Invalid Args\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
++ BCMLOG_ERR("UV offset for TX??\n");
++ return BC_STS_INV_ARG;
++
++ }
++
++ desc = pdesc_mem->pdma_desc_start;
++ desc_paddr_base = pdesc_mem->phy_addr;
++
++ if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
++ sg_cnt = ioreq->sg_cnt;
++ xfr_sz = ioreq->uinfo.xfr_len;
++ } else {
++ sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
++ xfr_sz = ioreq->uinfo.uv_offset;
++ }
++
++ sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
++ sg_st_ix, sg_st_off, xfr_sz);
++
++ if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
++ return sts;
++
++ /* Prepare for UV mapping.. */
++ desc = &pdesc_mem->pdma_desc_start[sg_cnt];
++ desc_paddr_base = pdesc_mem->phy_addr +
++ (sg_cnt * sizeof(dma_descriptor));
++
++ /* Done with desc addr.. now update sg stuff.*/
++ sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
++ xfr_sz = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
++ sg_st_ix = ioreq->uinfo.uv_sg_ix;
++ sg_st_off = ioreq->uinfo.uv_sg_off;
++
++ sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
++ sg_st_ix, sg_st_off, xfr_sz);
++ if (sts != BC_STS_SUCCESS)
++ return sts;
++
++ *uv_desc_index = sg_st_ix;
++
++ return sts;
++}
++
++static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
++{
++ uint32_t dma_cntrl;
++
++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
++ if (!(dma_cntrl & DMA_START_BIT)) {
++ dma_cntrl |= DMA_START_BIT;
++ crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
++ dma_cntrl);
++ }
++
++ return;
++}
++
++/* _CHECK_THIS_
++ *
++ * Verify if the Stop generates a completion interrupt or not.
++ * if it does not generate an interrupt, then add polling here.
++ */
++static BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
++{
++ uint32_t dma_cntrl, cnt = 30;
++ uint32_t l1 = 1, l2 = 1;
++ unsigned long flags = 0;
++
++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
++
++ BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
++
++ /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
++ if (!dma_cntrl & DMA_START_BIT) {
++ BCMLOG(BCMLOG_DBG, "Already Stopped\n");
++ return BC_STS_SUCCESS;
++ }
++
++ crystalhd_disable_interrupts(hw->adp);
++
++ /* Issue stop to HW */
++ /* This bit when set gave problems. Please check*/
++ dma_cntrl &= ~DMA_START_BIT;
++ crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
++
++ BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
++
++ /* Poll for 3seconds (30 * 100ms) on both the lists..*/
++ while ((l1 || l2) && cnt) {
++
++ if (l1) {
++ l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
++ l1 &= DMA_START_BIT;
++ }
++
++ if (l2) {
++ l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
++ l2 &= DMA_START_BIT;
++ }
++
++ msleep_interruptible(100);
++
++ cnt--;
++ }
++
++ if (!cnt) {
++ BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
++ crystalhd_enable_interrupts(hw->adp);
++ return BC_STS_ERROR;
++ }
++
++ spin_lock_irqsave(&hw->lock, flags);
++ hw->tx_list_post_index = 0;
++ spin_unlock_irqrestore(&hw->lock, flags);
++ BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
++ crystalhd_enable_interrupts(hw->adp);
++
++ return BC_STS_SUCCESS;
++}
++
++static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
++{
++ /*
++ * Position of the PIB Entries can be found at
++ * 0th and the 1st location of the Circular list.
++ */
++ uint32_t Q_addr;
++ uint32_t pib_cnt, r_offset, w_offset;
++
++ Q_addr = hw->pib_del_Q_addr;
++
++ /* Get the Read Pointer */
++ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
++
++ /* Get the Write Pointer */
++ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
++
++ if (r_offset == w_offset)
++ return 0; /* Queue is empty */
++
++ if (w_offset > r_offset)
++ pib_cnt = w_offset - r_offset;
++ else
++ pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
++ (r_offset + MIN_PIB_Q_DEPTH);
++
++ if (pib_cnt > MAX_PIB_Q_DEPTH) {
++ BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
++ return 0;
++ }
++
++ return pib_cnt;
++}
++
++static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
++{
++ uint32_t Q_addr;
++ uint32_t addr_entry, r_offset, w_offset;
++
++ Q_addr = hw->pib_del_Q_addr;
++
++ /* Get the Read Pointer 0Th Location is Read Pointer */
++ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
++
++ /* Get the Write Pointer 1st Location is Write pointer */
++ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
++
++ /* Queue is empty */
++ if (r_offset == w_offset)
++ return 0;
++
++ if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
++ return 0;
++
++ /* Get the Actual Address of the PIB */
++ crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
++ 1, &addr_entry);
++
++ /* Increment the Read Pointer */
++ r_offset++;
++
++ if (MAX_PIB_Q_DEPTH == r_offset)
++ r_offset = MIN_PIB_Q_DEPTH;
++
++ /* Write back the read pointer to It's Location */
++ crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
++
++ return addr_entry;
++}
++
++static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
++{
++ uint32_t Q_addr;
++ uint32_t r_offset, w_offset, n_offset;
++
++ Q_addr = hw->pib_rel_Q_addr;
++
++ /* Get the Read Pointer */
++ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
++
++ /* Get the Write Pointer */
++ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
++
++ if ((r_offset < MIN_PIB_Q_DEPTH) ||
++ (r_offset >= MAX_PIB_Q_DEPTH))
++ return false;
++
++ n_offset = w_offset + 1;
++
++ if (MAX_PIB_Q_DEPTH == n_offset)
++ n_offset = MIN_PIB_Q_DEPTH;
++
++ if (r_offset == n_offset)
++ return false; /* should never happen */
++
++ /* Write the DRAM ADDR to the Queue at Next Offset */
++ crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
++ 1, &addr_to_rel);
++
++ /* Put the New value of the write pointer in Queue */
++ crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
++
++ return true;
++}
++
++static void cpy_pib_to_app(C011_PIB *src_pib, BC_PIC_INFO_BLOCK *dst_pib)
++{
++ if (!src_pib || !dst_pib) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return;
++ }
++
++ dst_pib->timeStamp = 0;
++ dst_pib->picture_number = src_pib->ppb.picture_number;
++ dst_pib->width = src_pib->ppb.width;
++ dst_pib->height = src_pib->ppb.height;
++ dst_pib->chroma_format = src_pib->ppb.chroma_format;
++ dst_pib->pulldown = src_pib->ppb.pulldown;
++ dst_pib->flags = src_pib->ppb.flags;
++ dst_pib->sess_num = src_pib->ptsStcOffset;
++ dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio;
++ dst_pib->colour_primaries = src_pib->ppb.colour_primaries;
++ dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
++ dst_pib->frame_rate = src_pib->resolution ;
++ return;
++}
++
++static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
++{
++ unsigned int cnt;
++ C011_PIB src_pib;
++ uint32_t pib_addr, pib_cnt;
++ BC_PIC_INFO_BLOCK *AppPib;
++ crystalhd_rx_dma_pkt *rx_pkt = NULL;
++
++ pib_cnt = crystalhd_get_pib_avail_cnt(hw);
++
++ if (!pib_cnt)
++ return;
++
++ for (cnt = 0; cnt < pib_cnt; cnt++) {
++
++ pib_addr = crystalhd_get_addr_from_pib_Q(hw);
++ crystalhd_mem_rd(hw->adp, pib_addr, sizeof(C011_PIB) / 4,
++ (uint32_t *)&src_pib);
++
++ if (src_pib.bFormatChange) {
++ rx_pkt = (crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
++ if (!rx_pkt)
++ return;
++ rx_pkt->flags = 0;
++ rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE;
++ AppPib = &rx_pkt->pib;
++ cpy_pib_to_app(&src_pib, AppPib);
++
++ BCMLOG(BCMLOG_DBG,
++ "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
++ rx_pkt->pib.picture_number,
++ rx_pkt->pib.aspect_ratio,
++ rx_pkt->pib.chroma_format,
++ rx_pkt->pib.colour_primaries,
++ rx_pkt->pib.frame_rate,
++ rx_pkt->pib.height,
++ rx_pkt->pib.height,
++ rx_pkt->pib.n_drop,
++ rx_pkt->pib.pulldown,
++ rx_pkt->pib.ycom);
++
++ crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
++
++ }
++
++ crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
++ }
++}
++
++static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
++{
++ uint32_t dma_cntrl;
++
++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
++ if (!(dma_cntrl & DMA_START_BIT)) {
++ dma_cntrl |= DMA_START_BIT;
++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
++ }
++
++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
++ if (!(dma_cntrl & DMA_START_BIT)) {
++ dma_cntrl |= DMA_START_BIT;
++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
++ }
++
++ return;
++}
++
++static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
++{
++ uint32_t dma_cntrl = 0, count = 30;
++ uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
++
++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
++ if ((dma_cntrl & DMA_START_BIT)) {
++ dma_cntrl &= ~DMA_START_BIT;
++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
++ }
++
++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
++ if ((dma_cntrl & DMA_START_BIT)) {
++ dma_cntrl &= ~DMA_START_BIT;
++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
++ }
++
++ /* Poll for 3seconds (30 * 100ms) on both the lists..*/
++ while ((l0y || l0uv || l1y || l1uv) && count) {
++
++ if (l0y) {
++ l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
++ l0y &= DMA_START_BIT;
++ if (!l0y) {
++ hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
++ }
++ }
++
++ if (l1y) {
++ l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
++ l1y &= DMA_START_BIT;
++ if (!l1y) {
++ hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
++ }
++ }
++
++ if (l0uv) {
++ l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
++ l0uv &= DMA_START_BIT;
++ if (!l0uv) {
++ hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
++ }
++ }
++
++ if (l1uv) {
++ l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
++ l1uv &= DMA_START_BIT;
++ if (!l1uv) {
++ hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
++ }
++ }
++ msleep_interruptible(100);
++ count--;
++ }
++
++ hw->rx_list_post_index = 0;
++
++ BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
++ count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
++}
++
++static BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, crystalhd_rx_dma_pkt *rx_pkt)
++{
++ uint32_t y_low_addr_reg, y_high_addr_reg;
++ uint32_t uv_low_addr_reg, uv_high_addr_reg;
++ addr_64 desc_addr;
++ unsigned long flags;
++
++ if (!hw || !rx_pkt) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
++ BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
++ return BC_STS_INV_ARG;
++ }
++
++ spin_lock_irqsave(&hw->rx_lock, flags);
++ /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */
++ if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
++ spin_unlock_irqrestore(&hw->rx_lock, flags);
++ return BC_STS_BUSY;
++ }
++
++ if (!hw->rx_list_post_index) {
++ y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
++ y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
++ uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
++ uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
++ } else {
++ y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
++ y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
++ uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
++ uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
++ }
++ rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
++ hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
++ if (rx_pkt->uv_phy_addr)
++ hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
++ hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
++ spin_unlock_irqrestore(&hw->rx_lock, flags);
++
++ crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
++
++ crystalhd_start_rx_dma_engine(hw);
++ /* Program the Y descriptor */
++ desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
++ crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
++ crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
++
++ if (rx_pkt->uv_phy_addr) {
++ /* Program the UV descriptor */
++ desc_addr.full_addr = rx_pkt->uv_phy_addr;
++ crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
++ crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
++ }
++
++ return BC_STS_SUCCESS;
++}
++
++static BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
++ crystalhd_rx_dma_pkt *rx_pkt)
++{
++ BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
++
++ if (sts == BC_STS_BUSY)
++ crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
++ false, rx_pkt->pkt_tag);
++
++ return sts;
++}
++
++static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
++ uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
++{
++ uint32_t y_dn_sz_reg, uv_dn_sz_reg;
++
++ if (!list_index) {
++ y_dn_sz_reg = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
++ uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
++ } else {
++ y_dn_sz_reg = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
++ uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
++ }
++
++ *y_dw_dnsz = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
++ *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
++}
++
++/*
++ * This function should be called only after making sure that the two DMA
++ * lists are free. This function does not check if DMA's are active, before
++ * turning off the DMA.
++ */
++static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
++{
++ uint32_t dma_cntrl, aspm;
++
++ hw->stop_pending = 0;
++
++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
++ if (dma_cntrl & DMA_START_BIT) {
++ dma_cntrl &= ~DMA_START_BIT;
++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
++ }
++
++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
++ if (dma_cntrl & DMA_START_BIT) {
++ dma_cntrl &= ~DMA_START_BIT;
++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
++ }
++ hw->rx_list_post_index = 0;
++
++ aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
++ aspm |= ASPM_L1_ENABLE;
++ /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
++ crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
++}
++
++static BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
++ BC_STATUS comp_sts)
++{
++ crystalhd_rx_dma_pkt *rx_pkt = NULL;
++ uint32_t y_dw_dnsz, uv_dw_dnsz;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ if (!hw || list_index >= DMA_ENGINE_CNT) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
++ hw->rx_pkt_tag_seed + list_index);
++ if (!rx_pkt) {
++ BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
++ hw->rx_list_post_index, hw->rx_list_sts[0],
++ hw->rx_list_sts[1], list_index,
++ hw->rx_pkt_tag_seed + list_index, comp_sts);
++ return BC_STS_INV_ARG;
++ }
++
++ if (comp_sts == BC_STS_SUCCESS) {
++ crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
++ rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
++ rx_pkt->flags = COMP_FLAG_DATA_VALID;
++ if (rx_pkt->uv_phy_addr)
++ rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
++ crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
++ hw->rx_pkt_tag_seed + list_index);
++ return sts;
++ }
++
++ /* Check if we can post this DIO again. */
++ return crystalhd_hw_post_cap_buff(hw, rx_pkt);
++}
++
++static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
++ uint32_t y_err_sts, uint32_t uv_err_sts)
++{
++ uint32_t tmp;
++ list_sts tmp_lsts;
++
++ if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
++ return false;
++
++ tmp_lsts = hw->rx_list_sts[0];
++
++ /* Y0 - DMA */
++ tmp = y_err_sts & GET_Y0_ERR_MSK;
++ if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
++ hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
++
++ if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
++ hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
++ tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
++ }
++
++ if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
++ hw->rx_list_sts[0] &= ~rx_y_mask;
++ hw->rx_list_sts[0] |= rx_y_error;
++ tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
++ }
++
++ if (tmp) {
++ hw->rx_list_sts[0] &= ~rx_y_mask;
++ hw->rx_list_sts[0] |= rx_y_error;
++ hw->rx_list_post_index = 0;
++ }
++
++ /* UV0 - DMA */
++ tmp = uv_err_sts & GET_UV0_ERR_MSK;
++ if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
++ hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
++
++ if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
++ hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
++ tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
++ }
++
++ if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
++ hw->rx_list_sts[0] &= ~rx_uv_mask;
++ hw->rx_list_sts[0] |= rx_uv_error;
++ tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
++ }
++
++ if (tmp) {
++ hw->rx_list_sts[0] &= ~rx_uv_mask;
++ hw->rx_list_sts[0] |= rx_uv_error;
++ hw->rx_list_post_index = 0;
++ }
++
++ if (y_err_sts & GET_Y0_ERR_MSK) {
++ tmp = y_err_sts & GET_Y0_ERR_MSK;
++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
++ }
++
++ if (uv_err_sts & GET_UV0_ERR_MSK) {
++ tmp = uv_err_sts & GET_UV0_ERR_MSK;
++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
++ }
++
++ return (tmp_lsts != hw->rx_list_sts[0]);
++}
++
++static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
++ uint32_t y_err_sts, uint32_t uv_err_sts)
++{
++ uint32_t tmp;
++ list_sts tmp_lsts;
++
++ if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
++ return false;
++
++ tmp_lsts = hw->rx_list_sts[1];
++
++ /* Y1 - DMA */
++ tmp = y_err_sts & GET_Y1_ERR_MSK;
++ if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
++ hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
++
++ if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
++ hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
++ tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
++ }
++
++ if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
++ /* Add retry-support..*/
++ hw->rx_list_sts[1] &= ~rx_y_mask;
++ hw->rx_list_sts[1] |= rx_y_error;
++ tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
++ }
++
++ if (tmp) {
++ hw->rx_list_sts[1] &= ~rx_y_mask;
++ hw->rx_list_sts[1] |= rx_y_error;
++ hw->rx_list_post_index = 0;
++ }
++
++ /* UV1 - DMA */
++ tmp = uv_err_sts & GET_UV1_ERR_MSK;
++ if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK) {
++ hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
++ }
++
++ if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
++ hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
++ tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
++ }
++
++ if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
++ /* Add retry-support*/
++ hw->rx_list_sts[1] &= ~rx_uv_mask;
++ hw->rx_list_sts[1] |= rx_uv_error;
++ tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
++ }
++
++ if (tmp) {
++ hw->rx_list_sts[1] &= ~rx_uv_mask;
++ hw->rx_list_sts[1] |= rx_uv_error;
++ hw->rx_list_post_index = 0;
++ }
++
++ if (y_err_sts & GET_Y1_ERR_MSK) {
++ tmp = y_err_sts & GET_Y1_ERR_MSK;
++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
++ }
++
++ if (uv_err_sts & GET_UV1_ERR_MSK) {
++ tmp = uv_err_sts & GET_UV1_ERR_MSK;
++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
++ }
++
++ return (tmp_lsts != hw->rx_list_sts[1]);
++}
++
++
++static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
++{
++ unsigned long flags;
++ uint32_t i, list_avail = 0;
++ BC_STATUS comp_sts = BC_STS_NO_DATA;
++ uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
++ bool ret = 0;
++
++ if (!hw) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return;
++ }
++
++ if (!(intr_sts & GET_RX_INTR_MASK))
++ return;
++
++ y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
++ uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
++
++ for (i = 0; i < DMA_ENGINE_CNT; i++) {
++ /* Update States..*/
++ spin_lock_irqsave(&hw->rx_lock, flags);
++ if (i == 0)
++ ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
++ else
++ ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
++ if (ret) {
++ switch (hw->rx_list_sts[i]) {
++ case sts_free:
++ comp_sts = BC_STS_SUCCESS;
++ list_avail = 1;
++ break;
++ case rx_y_error:
++ case rx_uv_error:
++ case rx_sts_error:
++ /* We got error on both or Y or uv. */
++ hw->stats.rx_errors++;
++ crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
++ /* FIXME: jarod: this is where my mini pci-e card is tripping up */
++ BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
++ "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
++ i, hw->stats.rx_errors, y_err_sts,
++ uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
++ hw->rx_list_sts[i] = sts_free;
++ comp_sts = BC_STS_ERROR;
++ break;
++ default:
++ /* Wait for completion..*/
++ comp_sts = BC_STS_NO_DATA;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&hw->rx_lock, flags);
++
++ /* handle completion...*/
++ if (comp_sts != BC_STS_NO_DATA) {
++ crystalhd_rx_pkt_done(hw, i, comp_sts);
++ comp_sts = BC_STS_NO_DATA;
++ }
++ }
++
++ if (list_avail) {
++ if (hw->stop_pending) {
++ if ((hw->rx_list_sts[0] == sts_free) &&
++ (hw->rx_list_sts[1] == sts_free))
++ crystalhd_hw_finalize_pause(hw);
++ } else {
++ crystalhd_hw_start_capture(hw);
++ }
++ }
++}
++
++static BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
++ BC_FW_CMD *fw_cmd)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++ DecRspChannelStartVideo *st_rsp = NULL;
++
++ switch (fw_cmd->cmd[0]) {
++ case eCMD_C011_DEC_CHAN_START_VIDEO:
++ st_rsp = (DecRspChannelStartVideo *)fw_cmd->rsp;
++ hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
++ hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
++ BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
++ hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
++ break;
++ case eCMD_C011_INIT:
++ if (!(crystalhd_load_firmware_config(hw->adp))) {
++ BCMLOG_ERR("Invalid Params.\n");
++ sts = BC_STS_FW_AUTH_FAILED;
++ }
++ break;
++ default:
++ break;
++ }
++ return sts;
++}
++
++static BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
++{
++ uint32_t reg;
++ link_misc_perst_decoder_ctrl rst_cntrl_reg;
++
++ /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
++ rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
++
++ rst_cntrl_reg.bcm_7412_rst = 1;
++ crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
++ msleep_interruptible(50);
++
++ rst_cntrl_reg.bcm_7412_rst = 0;
++ crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
++
++ /* Close all banks, put DDR in idle */
++ bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
++
++ /* Set bit 25 (drop CKE pin of DDR) */
++ reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
++ reg |= 0x02000000;
++ bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
++
++ /* Reset the audio block */
++ bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
++
++ /* Power down Raptor PLL */
++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
++ reg |= 0x00008000;
++ bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
++
++ /* Power down all Audio PLL */
++ bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
++
++ /* Power down video clock (75MHz) */
++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
++ reg |= 0x00008000;
++ bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
++
++ /* Power down video clock (75MHz) */
++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
++ reg |= 0x00008000;
++ bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
++
++ /* Power down core clock (200MHz) */
++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
++ reg |= 0x00008000;
++ bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
++
++ /* Power down core clock (200MHz) */
++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
++ reg |= 0x00008000;
++ bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
++
++ return BC_STS_SUCCESS;
++}
++
++/************************************************
++**
++*************************************************/
++
++BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
++{
++ uint32_t reg_data, cnt, *temp_buff;
++ uint32_t fw_sig_len = 36;
++ uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
++
++ BCMLOG_ENTER;
++
++ if (!adp || !buffer || !sz) {
++ BCMLOG_ERR("Invalid Params.\n");
++ return BC_STS_INV_ARG;
++ }
++
++ reg_data = crystalhd_reg_rd(adp, OTP_CMD);
++ if (!(reg_data & 0x02)) {
++ BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
++ return BC_STS_ERROR;
++ }
++
++ reg_data = 0;
++ crystalhd_reg_wr(adp, DCI_CMD, 0);
++ reg_data |= BC_BIT(0);
++ crystalhd_reg_wr(adp, DCI_CMD, reg_data);
++
++ reg_data = 0;
++ cnt = 1000;
++ msleep_interruptible(10);
++
++ while (reg_data != BC_BIT(4)) {
++ reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
++ reg_data &= BC_BIT(4);
++ if (--cnt == 0) {
++ BCMLOG_ERR("Firmware Download RDY Timeout.\n");
++ return BC_STS_TIMEOUT;
++ }
++ }
++
++ msleep_interruptible(10);
++ /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
++ crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
++ temp_buff = (uint32_t *)buffer;
++ for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
++ crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
++ crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
++ dram_offset += 4;
++ temp_buff++;
++ }
++ msleep_interruptible(10);
++
++ temp_buff++;
++
++ sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
++ for (cnt = 0; cnt < 8; cnt++) {
++ uint32_t swapped_data = *temp_buff;
++ swapped_data = bswap_32_1(swapped_data);
++ crystalhd_reg_wr(adp, sig_reg, swapped_data);
++ sig_reg -= 4;
++ temp_buff++;
++ }
++ msleep_interruptible(10);
++
++ reg_data = 0;
++ reg_data |= BC_BIT(1);
++ crystalhd_reg_wr(adp, DCI_CMD, reg_data);
++ msleep_interruptible(10);
++
++ reg_data = 0;
++ reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
++
++ if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
++ cnt = 1000;
++ while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
++ reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
++ reg_data &= BC_BIT(0);
++ if (!(--cnt))
++ break;
++ msleep_interruptible(10);
++ }
++ reg_data = 0;
++ reg_data = crystalhd_reg_rd(adp, DCI_CMD);
++ reg_data |= BC_BIT(4);
++ crystalhd_reg_wr(adp, DCI_CMD, reg_data);
++
++ } else {
++ BCMLOG_ERR("F/w Signature mismatch\n");
++ return BC_STS_FW_AUTH_FAILED;
++ }
++
++ BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
++ return BC_STS_SUCCESS;;
++}
++
++BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd)
++{
++ uint32_t cnt = 0, cmd_res_addr;
++ uint32_t *cmd_buff, *res_buff;
++ wait_queue_head_t fw_cmd_event;
++ int rc = 0;
++ BC_STATUS sts;
++
++ crystalhd_create_event(&fw_cmd_event);
++
++ BCMLOG_ENTER;
++
++ if (!hw || !fw_cmd) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ cmd_buff = fw_cmd->cmd;
++ res_buff = fw_cmd->rsp;
++
++ if (!cmd_buff || !res_buff) {
++ BCMLOG_ERR("Invalid Parameters for F/W Command \n");
++ return BC_STS_INV_ARG;
++ }
++
++ hw->pwr_lock++;
++
++ hw->fwcmd_evt_sts = 0;
++ hw->pfw_cmd_event = &fw_cmd_event;
++
++ /*Write the command to the memory*/
++ crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
++
++ /*Memory Read for memory arbitrator flush*/
++ crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
++
++ /* Write the command address to mailbox */
++ bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
++ msleep_interruptible(50);
++
++ crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
++
++ if (!rc) {
++ sts = BC_STS_SUCCESS;
++ } else if (rc == -EBUSY) {
++ BCMLOG_ERR("Firmware command T/O\n");
++ sts = BC_STS_TIMEOUT;
++ } else if (rc == -EINTR) {
++ BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
++ sts = BC_STS_IO_USER_ABORT;
++ } else {
++ BCMLOG_ERR("FwCmd IO Error.\n");
++ sts = BC_STS_IO_ERROR;
++ }
++
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("FwCmd Failed.\n");
++ hw->pwr_lock--;
++ return sts;
++ }
++
++ /*Get the Responce Address*/
++ cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
++
++ /*Read the Response*/
++ crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
++
++ hw->pwr_lock--;
++
++ if (res_buff[2] != C011_RET_SUCCESS) {
++ BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
++ return BC_STS_FW_CMD_ERR;
++ }
++
++ sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
++ if (sts != BC_STS_SUCCESS)
++ BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
++
++ return sts;
++}
++
++bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
++{
++ uint32_t intr_sts = 0;
++ uint32_t deco_intr = 0;
++ bool rc = 0;
++
++ if (!adp || !hw->dev_started)
++ return rc;
++
++ hw->stats.num_interrupts++;
++ hw->pwr_lock++;
++
++ deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
++ intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
++
++ if (intr_sts) {
++ /* let system know we processed interrupt..*/
++ rc = 1;
++ hw->stats.dev_interrupts++;
++ }
++
++ if (deco_intr && (deco_intr != 0xdeaddead)) {
++
++ if (deco_intr & 0x80000000) {
++ /*Set the Event and the status flag*/
++ if (hw->pfw_cmd_event) {
++ hw->fwcmd_evt_sts = 1;
++ crystalhd_set_event(hw->pfw_cmd_event);
++ }
++ }
++
++ if (deco_intr & BC_BIT(1))
++ crystalhd_hw_proc_pib(hw);
++
++ bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
++ /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */
++ bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
++ rc = 1;
++ }
++
++ /* Rx interrupts */
++ crystalhd_rx_isr(hw, intr_sts);
++
++ /* Tx interrupts*/
++ crystalhd_tx_isr(hw, intr_sts);
++
++ /* Clear interrupts */
++ if (rc) {
++ if (intr_sts)
++ crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
++
++ crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
++ }
++
++ hw->pwr_lock--;
++
++ return rc;
++}
++
++BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
++{
++ if (!hw || !adp) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (hw->dev_started)
++ return BC_STS_SUCCESS;
++
++ memset(hw, 0, sizeof(struct crystalhd_hw));
++
++ hw->adp = adp;
++ spin_lock_init(&hw->lock);
++ spin_lock_init(&hw->rx_lock);
++ /* FIXME: jarod: what are these magic numbers?!? */
++ hw->tx_ioq_tag_seed = 0x70023070;
++ hw->rx_pkt_tag_seed = 0x70029070;
++
++ hw->stop_pending = 0;
++ crystalhd_start_device(hw->adp);
++ hw->dev_started = true;
++
++ /* set initial core clock */
++ hw->core_clock_mhz = CLOCK_PRESET;
++ hw->prev_n = 0;
++ hw->pwr_lock = 0;
++ crystalhd_hw_set_core_clock(hw);
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
++{
++ if (!hw) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ if (!hw->dev_started)
++ return BC_STS_SUCCESS;
++
++ /* Stop and DDR sleep will happen in here */
++ crystalhd_hw_suspend(hw);
++ hw->dev_started = false;
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
++{
++ unsigned int i;
++ void *mem;
++ size_t mem_len;
++ dma_addr_t phy_addr;
++ BC_STATUS sts = BC_STS_SUCCESS;
++ crystalhd_rx_dma_pkt *rpkt;
++
++ if (!hw || !hw->adp) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ sts = crystalhd_hw_create_ioqs(hw);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("Failed to create IOQs..\n");
++ return sts;
++ }
++
++ mem_len = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
++
++ for (i = 0; i < BC_TX_LIST_CNT; i++) {
++ mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
++ if (mem) {
++ memset(mem, 0, mem_len);
++ } else {
++ BCMLOG_ERR("Insufficient Memory For TX\n");
++ crystalhd_hw_free_dma_rings(hw);
++ return BC_STS_INSUFF_RES;
++ }
++ /* rx_pkt_pool -- static memory allocation */
++ hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
++ hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
++ hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
++ sizeof(dma_descriptor);
++ hw->tx_pkt_pool[i].list_tag = 0;
++
++ /* Add TX dma requests to Free Queue..*/
++ sts = crystalhd_dioq_add(hw->tx_freeq,
++ &hw->tx_pkt_pool[i], false, 0);
++ if (sts != BC_STS_SUCCESS) {
++ crystalhd_hw_free_dma_rings(hw);
++ return sts;
++ }
++ }
++
++ for (i = 0; i < BC_RX_LIST_CNT; i++) {
++ rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
++ if (!rpkt) {
++ BCMLOG_ERR("Insufficient Memory For RX\n");
++ crystalhd_hw_free_dma_rings(hw);
++ return BC_STS_INSUFF_RES;
++ }
++
++ mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
++ if (mem) {
++ memset(mem, 0, mem_len);
++ } else {
++ BCMLOG_ERR("Insufficient Memory For RX\n");
++ crystalhd_hw_free_dma_rings(hw);
++ return BC_STS_INSUFF_RES;
++ }
++ rpkt->desc_mem.pdma_desc_start = mem;
++ rpkt->desc_mem.phy_addr = phy_addr;
++ rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
++ rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
++ crystalhd_hw_free_rx_pkt(hw, rpkt);
++ }
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
++{
++ unsigned int i;
++ crystalhd_rx_dma_pkt *rpkt = NULL;
++
++ if (!hw || !hw->adp) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ /* Delete all IOQs.. */
++ crystalhd_hw_delete_ioqs(hw);
++
++ for (i = 0; i < BC_TX_LIST_CNT; i++) {
++ if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
++ bc_kern_dma_free(hw->adp,
++ hw->tx_pkt_pool[i].desc_mem.sz,
++ hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
++ hw->tx_pkt_pool[i].desc_mem.phy_addr);
++
++ hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
++ }
++ }
++
++ BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
++ do {
++ rpkt = crystalhd_hw_alloc_rx_pkt(hw);
++ if (!rpkt)
++ break;
++ bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
++ rpkt->desc_mem.pdma_desc_start,
++ rpkt->desc_mem.phy_addr);
++ kfree(rpkt);
++ } while (rpkt);
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq,
++ hw_comp_callback call_back,
++ wait_queue_head_t *cb_event, uint32_t *list_id,
++ uint8_t data_flags)
++{
++ tx_dma_pkt *tx_dma_packet = NULL;
++ uint32_t first_desc_u_addr, first_desc_l_addr;
++ uint32_t low_addr, high_addr;
++ addr_64 desc_addr;
++ BC_STATUS sts, add_sts;
++ uint32_t dummy_index = 0;
++ unsigned long flags;
++ bool rc;
++
++ if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ /*
++ * Since we hit code in busy condition very frequently,
++ * we will check the code in status first before
++ * checking the availability of free elem.
++ *
++ * This will avoid the Q fetch/add in normal condition.
++ */
++ rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
++ false, data_flags);
++ if (rc) {
++ hw->stats.cin_busy++;
++ return BC_STS_BUSY;
++ }
++
++ /* Get a list from TxFreeQ */
++ tx_dma_packet = (tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
++ if (!tx_dma_packet) {
++ BCMLOG_ERR("No empty elements..\n");
++ return BC_STS_ERR_USAGE;
++ }
++
++ sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
++ &tx_dma_packet->desc_mem,
++ &dummy_index);
++ if (sts != BC_STS_SUCCESS) {
++ add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
++ false, 0);
++ if (add_sts != BC_STS_SUCCESS)
++ BCMLOG_ERR("double fault..\n");
++
++ return sts;
++ }
++
++ hw->pwr_lock++;
++
++ desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
++ low_addr = desc_addr.low_part;
++ high_addr = desc_addr.high_part;
++
++ tx_dma_packet->call_back = call_back;
++ tx_dma_packet->cb_event = cb_event;
++ tx_dma_packet->dio_req = ioreq;
++
++ spin_lock_irqsave(&hw->lock, flags);
++
++ if (hw->tx_list_post_index == 0) {
++ first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
++ first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
++ } else {
++ first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
++ first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
++ }
++
++ *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
++ hw->tx_list_post_index;
++
++ hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
++
++ spin_unlock_irqrestore(&hw->lock, flags);
++
++
++ /* Insert in Active Q..*/
++ crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
++ tx_dma_packet->list_tag);
++
++ /*
++ * Interrupt will come as soon as you write
++ * the valid bit. So be ready for that. All
++ * the initialization should happen before that.
++ */
++ crystalhd_start_tx_dma_engine(hw);
++ crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
++
++ crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
++ /* Be sure we set the valid bit ^^^^ */
++
++ return BC_STS_SUCCESS;
++}
++
++/*
++ * This is a force cancel and we are racing with ISR.
++ *
++ * Will try to remove the req from ActQ before ISR gets it.
++ * If ISR gets it first then the completion happens in the
++ * normal path and we will return _STS_NO_DATA from here.
++ *
++ * FIX_ME: Not Tested the actual condition..
++ */
++BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
++{
++ if (!hw || !list_id) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ crystalhd_stop_tx_dma_engine(hw);
++ crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
++ crystalhd_dio_req *ioreq, bool en_post)
++{
++ crystalhd_rx_dma_pkt *rpkt;
++ uint32_t tag, uv_desc_ix = 0;
++ BC_STATUS sts;
++
++ if (!hw || !ioreq) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ rpkt = crystalhd_hw_alloc_rx_pkt(hw);
++ if (!rpkt) {
++ BCMLOG_ERR("Insufficient resources\n");
++ return BC_STS_INSUFF_RES;
++ }
++
++ rpkt->dio_req = ioreq;
++ tag = rpkt->pkt_tag;
++
++ sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
++ if (sts != BC_STS_SUCCESS)
++ return sts;
++
++ rpkt->uv_phy_addr = 0;
++
++ /* Store the address of UV in the rx packet for post*/
++ if (uv_desc_ix)
++ rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
++ (sizeof(dma_descriptor) * (uv_desc_ix + 1));
++
++ if (en_post)
++ sts = crystalhd_hw_post_cap_buff(hw, rpkt);
++ else
++ sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
++
++ return sts;
++}
++
++BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
++ BC_PIC_INFO_BLOCK *pib,
++ crystalhd_dio_req **ioreq)
++{
++ crystalhd_rx_dma_pkt *rpkt;
++ uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
++ uint32_t sig_pending = 0;
++
++
++ if (!hw || !ioreq || !pib) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
++ if (!rpkt) {
++ if (sig_pending) {
++ BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
++ return BC_STS_IO_USER_ABORT;
++ } else {
++ return BC_STS_TIMEOUT;
++ }
++ }
++
++ rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
++
++ if (rpkt->flags & COMP_FLAG_PIB_VALID)
++ memcpy(pib, &rpkt->pib, sizeof(*pib));
++
++ *ioreq = rpkt->dio_req;
++
++ crystalhd_hw_free_rx_pkt(hw, rpkt);
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
++{
++ crystalhd_rx_dma_pkt *rx_pkt;
++ BC_STATUS sts;
++ uint32_t i;
++
++ if (!hw) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ /* This is start of capture.. Post to both the lists.. */
++ for (i = 0; i < DMA_ENGINE_CNT; i++) {
++ rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
++ if (!rx_pkt)
++ return BC_STS_NO_DATA;
++ sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
++ if (BC_STS_SUCCESS != sts)
++ break;
++
++ }
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
++{
++ void *temp = NULL;
++
++ if (!hw) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ crystalhd_stop_rx_dma_engine(hw);
++
++ do {
++ temp = crystalhd_dioq_fetch(hw->rx_freeq);
++ if (temp)
++ crystalhd_rx_pkt_rel_call_back(hw, temp);
++ } while (temp);
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
++{
++ hw->stats.pause_cnt++;
++ hw->stop_pending = 1;
++
++ if ((hw->rx_list_sts[0] == sts_free) &&
++ (hw->rx_list_sts[1] == sts_free))
++ crystalhd_hw_finalize_pause(hw);
++
++ return BC_STS_SUCCESS;
++}
++
++BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
++{
++ BC_STATUS sts;
++ uint32_t aspm;
++
++ hw->stop_pending = 0;
++
++ aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
++ aspm &= ~ASPM_L1_ENABLE;
++/* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
++ crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
++
++ sts = crystalhd_hw_start_capture(hw);
++ return sts;
++}
++
++BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
++{
++ BC_STATUS sts;
++
++ if (!hw) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ sts = crystalhd_put_ddr2sleep(hw);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
++ return BC_STS_ERROR;
++ }
++
++ if (!crystalhd_stop_device(hw->adp)) {
++ BCMLOG_ERR("Failed to Stop Device!!\n");
++ return BC_STS_ERROR;
++ }
++
++ return BC_STS_SUCCESS;
++}
++
++void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats)
++{
++ if (!hw) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return;
++ }
++
++ /* if called w/NULL stats, its a req to zero out the stats */
++ if (!stats) {
++ memset(&hw->stats, 0, sizeof(hw->stats));
++ return;
++ }
++
++ hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
++ hw->stats.rdyq_count = crystalhd_dioq_count(hw->rx_rdyq);
++ memcpy(stats, &hw->stats, sizeof(*stats));
++}
++
++BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
++{
++ uint32_t reg, n, i;
++ uint32_t vco_mg, refresh_reg;
++
++ if (!hw) {
++ BCMLOG_ERR("Invalid Arguments\n");
++ return BC_STS_INV_ARG;
++ }
++
++ /* FIXME: jarod: wha? */
++ /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
++ n = hw->core_clock_mhz/5;
++
++ if (n == hw->prev_n)
++ return BC_STS_CLK_NOCHG;
++
++ if (hw->pwr_lock > 0) {
++ /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
++ return BC_STS_CLK_NOCHG;
++ }
++
++ i = n * 27;
++ if (i < 560)
++ vco_mg = 0;
++ else if (i < 900)
++ vco_mg = 1;
++ else if (i < 1030)
++ vco_mg = 2;
++ else
++ vco_mg = 3;
++
++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
++
++ reg &= 0xFFFFCFC0;
++ reg |= n;
++ reg |= vco_mg << 12;
++
++ BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
++ hw->core_clock_mhz, n, vco_mg);
++
++ /* Change the DRAM refresh rate to accomodate the new frequency */
++ /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
++ refresh_reg = (7 * hw->core_clock_mhz / 16);
++ bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
++
++ bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
++
++ i = 0;
++
++ for (i = 0; i < 10; i++) {
++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
++
++ if (reg & 0x00020000) {
++ hw->prev_n = n;
++ /* FIXME: jarod: outputting a random "C" is... confusing... */
++ BCMLOG(BCMLOG_INFO, "C");
++ return BC_STS_SUCCESS;
++ } else {
++ msleep_interruptible(10);
++ }
++ }
++ BCMLOG(BCMLOG_INFO, "clk change failed\n");
++ return BC_STS_CLK_NOCHG;
++}
+diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h
+new file mode 100644
+index 0000000..1c6318e
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_hw.h
+@@ -0,0 +1,398 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_hw . h
++ *
++ * Description:
++ * BCM70012 Linux driver hardware layer.
++ *
++ * HISTORY:
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#ifndef _CRYSTALHD_HW_H_
++#define _CRYSTALHD_HW_H_
++
++#include "crystalhd_misc.h"
++#include "crystalhd_fw_if.h"
++
++/* HW constants..*/
++#define DMA_ENGINE_CNT 2
++#define MAX_PIB_Q_DEPTH 64
++#define MIN_PIB_Q_DEPTH 2
++#define WR_POINTER_OFF 4
++
++#define ASPM_L1_ENABLE (BC_BIT(27))
++
++/*************************************************
++ 7412 Decoder Registers.
++**************************************************/
++#define FW_CMD_BUFF_SZ 64
++#define TS_Host2CpuSnd 0x00000100
++#define Hst2CpuMbx1 0x00100F00
++#define Cpu2HstMbx1 0x00100F04
++#define MbxStat1 0x00100F08
++#define Stream2Host_Intr_Sts 0x00100F24
++#define C011_RET_SUCCESS 0x0 /* Reutrn status of firmware command. */
++
++/* TS input status register */
++#define TS_StreamAFIFOStatus 0x0010044C
++#define TS_StreamBFIFOStatus 0x0010084C
++
++/*UART Selection definitions*/
++#define UartSelectA 0x00100300
++#define UartSelectB 0x00100304
++
++#define BSVS_UART_DEC_NONE 0x00
++#define BSVS_UART_DEC_OUTER 0x01
++#define BSVS_UART_DEC_INNER 0x02
++#define BSVS_UART_STREAM 0x03
++
++/* Code-In fifo */
++#define REG_DecCA_RegCinCTL 0xa00
++#define REG_DecCA_RegCinBase 0xa0c
++#define REG_DecCA_RegCinEnd 0xa10
++#define REG_DecCA_RegCinWrPtr 0xa04
++#define REG_DecCA_RegCinRdPtr 0xa08
++
++#define REG_Dec_TsUser0Base 0x100864
++#define REG_Dec_TsUser0Rdptr 0x100868
++#define REG_Dec_TsUser0Wrptr 0x10086C
++#define REG_Dec_TsUser0End 0x100874
++
++/* ASF Case ...*/
++#define REG_Dec_TsAudCDB2Base 0x10036c
++#define REG_Dec_TsAudCDB2Rdptr 0x100378
++#define REG_Dec_TsAudCDB2Wrptr 0x100374
++#define REG_Dec_TsAudCDB2End 0x100370
++
++/* DRAM bringup Registers */
++#define SDRAM_PARAM 0x00040804
++#define SDRAM_PRECHARGE 0x000408B0
++#define SDRAM_EXT_MODE 0x000408A4
++#define SDRAM_MODE 0x000408A0
++#define SDRAM_REFRESH 0x00040890
++#define SDRAM_REF_PARAM 0x00040808
++
++#define DecHt_PllACtl 0x34000C
++#define DecHt_PllBCtl 0x340010
++#define DecHt_PllCCtl 0x340014
++#define DecHt_PllDCtl 0x340034
++#define DecHt_PllECtl 0x340038
++#define AUD_DSP_MISC_SOFT_RESET 0x00240104
++#define AIO_MISC_PLL_RESET 0x0026000C
++#define PCIE_CLK_REQ_REG 0xDC
++#define PCI_CLK_REQ_ENABLE (BC_BIT(8))
++
++/*************************************************
++ F/W Copy engine definitions..
++**************************************************/
++#define BC_FWIMG_ST_ADDR 0x00000000
++/* FIXME: jarod: there's a kernel function that'll do this for us... */
++#define rotr32_1(x, n) (((x) >> n) | ((x) << (32 - n)))
++#define bswap_32_1(x) ((rotr32_1((x), 24) & 0x00ff00ff) | (rotr32_1((x), 8) & 0xff00ff00))
++
++#define DecHt_HostSwReset 0x340000
++#define BC_DRAM_FW_CFG_ADDR 0x001c2000
++
++typedef union _addr_64_ {
++ struct {
++ uint32_t low_part;
++ uint32_t high_part;
++ };
++
++ uint64_t full_addr;
++
++} addr_64;
++
++typedef union _intr_mask_reg_ {
++ struct {
++ uint32_t mask_tx_done:1;
++ uint32_t mask_tx_err:1;
++ uint32_t mask_rx_done:1;
++ uint32_t mask_rx_err:1;
++ uint32_t mask_pcie_err:1;
++ uint32_t mask_pcie_rbusmast_err:1;
++ uint32_t mask_pcie_rgr_bridge:1;
++ uint32_t reserved:25;
++ };
++
++ uint32_t whole_reg;
++
++} intr_mask_reg;
++
++typedef union _link_misc_perst_deco_ctrl_ {
++ struct {
++ uint32_t bcm7412_rst:1; /* 1 -> BCM7412 is held in reset. Reset value 1.*/
++ uint32_t reserved0:3; /* Reserved.No Effect*/
++ uint32_t stop_bcm_7412_clk:1; /* 1 ->Stops branch of 27MHz clk used to clk BCM7412*/
++ uint32_t reserved1:27; /* Reseved. No Effect*/
++ };
++
++ uint32_t whole_reg;
++
++} link_misc_perst_deco_ctrl;
++
++typedef union _link_misc_perst_clk_ctrl_ {
++ struct {
++ uint32_t sel_alt_clk:1; /* When set, selects a 6.75MHz clock as the source of core_clk */
++ uint32_t stop_core_clk:1; /* When set, stops the branch of core_clk that is not needed for low power operation */
++ uint32_t pll_pwr_dn:1; /* When set, powers down the main PLL. The alternate clock bit should be set
++ to select an alternate clock before setting this bit.*/
++ uint32_t reserved0:5; /* Reserved */
++ uint32_t pll_mult:8; /* This setting controls the multiplier for the PLL. */
++ uint32_t pll_div:4; /* This setting controls the divider for the PLL. */
++ uint32_t reserved1:12; /* Reserved */
++ };
++
++ uint32_t whole_reg;
++
++} link_misc_perst_clk_ctrl;
++
++
++typedef union _link_misc_perst_decoder_ctrl_ {
++ struct {
++ uint32_t bcm_7412_rst:1; /* 1 -> BCM7412 is held in reset. Reset value 1.*/
++ uint32_t res0:3; /* Reserved.No Effect*/
++ uint32_t stop_7412_clk:1; /* 1 ->Stops branch of 27MHz clk used to clk BCM7412*/
++ uint32_t res1:27; /* Reseved. No Effect */
++ };
++
++ uint32_t whole_reg;
++
++} link_misc_perst_decoder_ctrl;
++
++
++typedef union _desc_low_addr_reg_ {
++ struct {
++ uint32_t list_valid:1;
++ uint32_t reserved:4;
++ uint32_t low_addr:27;
++ };
++
++ uint32_t whole_reg;
++
++} desc_low_addr_reg;
++
++typedef struct _dma_descriptor_ { /* 8 32-bit values */
++ /* 0th u32 */
++ uint32_t sdram_buff_addr:28; /* bits 0-27: SDRAM Address */
++ uint32_t res0:4; /* bits 28-31: Reserved */
++
++ /* 1st u32 */
++ uint32_t buff_addr_low; /* 1 buffer address low */
++ uint32_t buff_addr_high; /* 2 buffer address high */
++
++ /* 3rd u32 */
++ uint32_t res2:2; /* 0-1 - Reserved */
++ uint32_t xfer_size:23; /* 2-24 = Xfer size in words */
++ uint32_t res3:6; /* 25-30 reserved */
++ uint32_t intr_enable:1; /* 31 - Interrupt After this desc */
++
++ /* 4th u32 */
++ uint32_t endian_xlat_align:2; /* 0-1 Endian Translation */
++ uint32_t next_desc_cont:1; /* 2 - Next desc is in contig memory */
++ uint32_t res4:25; /* 3 - 27 Reserved bits */
++ uint32_t fill_bytes:2; /* 28-29 Bits Fill Bytes */
++ uint32_t dma_dir:1; /* 30 bit DMA Direction */
++ uint32_t last_rec_indicator:1; /* 31 bit Last Record Indicator */
++
++ /* 5th u32 */
++ uint32_t next_desc_addr_low; /* 32-bits Next Desc Addr lower */
++
++ /* 6th u32 */
++ uint32_t next_desc_addr_high; /* 32-bits Next Desc Addr Higher */
++
++ /* 7th u32 */
++ uint32_t res8; /* Last 32bits reserved */
++
++} dma_descriptor, *pdma_descriptor;
++
++/*
++ * We will allocate the memory in 4K pages
++ * the linked list will be a list of 32 byte descriptors.
++ * The virtual address will determine what should be freed.
++ */
++typedef struct _dma_desc_mem_ {
++ pdma_descriptor pdma_desc_start; /* 32-bytes for dma descriptor. should be first element */
++ dma_addr_t phy_addr; /* physical address of each DMA desc */
++ uint32_t sz;
++ struct _dma_desc_mem_ *Next; /* points to Next Descriptor in chain */
++
++} dma_desc_mem, *pdma_desc_mem;
++
++
++
++typedef enum _list_sts_ {
++ sts_free = 0,
++
++ /* RX-Y Bits 0:7 */
++ rx_waiting_y_intr = 0x00000001,
++ rx_y_error = 0x00000004,
++
++ /* RX-UV Bits 8:16 */
++ rx_waiting_uv_intr = 0x0000100,
++ rx_uv_error = 0x0000400,
++
++ rx_sts_waiting = (rx_waiting_y_intr|rx_waiting_uv_intr),
++ rx_sts_error = (rx_y_error|rx_uv_error),
++
++ rx_y_mask = 0x000000FF,
++ rx_uv_mask = 0x0000FF00,
++
++} list_sts;
++
++typedef struct _tx_dma_pkt_ {
++ dma_desc_mem desc_mem;
++ hw_comp_callback call_back;
++ crystalhd_dio_req *dio_req;
++ wait_queue_head_t *cb_event;
++ uint32_t list_tag;
++
++} tx_dma_pkt;
++
++typedef struct _crystalhd_rx_dma_pkt {
++ dma_desc_mem desc_mem;
++ crystalhd_dio_req *dio_req;
++ uint32_t pkt_tag;
++ uint32_t flags;
++ BC_PIC_INFO_BLOCK pib;
++ dma_addr_t uv_phy_addr;
++ struct _crystalhd_rx_dma_pkt *next;
++
++} crystalhd_rx_dma_pkt;
++
++struct crystalhd_hw_stats{
++ uint32_t rx_errors;
++ uint32_t tx_errors;
++ uint32_t freeq_count;
++ uint32_t rdyq_count;
++ uint32_t num_interrupts;
++ uint32_t dev_interrupts;
++ uint32_t cin_busy;
++ uint32_t pause_cnt;
++};
++
++struct crystalhd_hw {
++ tx_dma_pkt tx_pkt_pool[DMA_ENGINE_CNT];
++ spinlock_t lock;
++
++ uint32_t tx_ioq_tag_seed;
++ uint32_t tx_list_post_index;
++
++ crystalhd_rx_dma_pkt *rx_pkt_pool_head;
++ uint32_t rx_pkt_tag_seed;
++
++ bool dev_started;
++ void *adp;
++
++ wait_queue_head_t *pfw_cmd_event;
++ int fwcmd_evt_sts;
++
++ uint32_t pib_del_Q_addr;
++ uint32_t pib_rel_Q_addr;
++
++ crystalhd_dioq_t *tx_freeq;
++ crystalhd_dioq_t *tx_actq;
++
++ /* Rx DMA Engine Specific Locks */
++ spinlock_t rx_lock;
++ uint32_t rx_list_post_index;
++ list_sts rx_list_sts[DMA_ENGINE_CNT];
++ crystalhd_dioq_t *rx_rdyq;
++ crystalhd_dioq_t *rx_freeq;
++ crystalhd_dioq_t *rx_actq;
++ uint32_t stop_pending;
++
++ /* HW counters.. */
++ struct crystalhd_hw_stats stats;
++
++ /* Core clock in MHz */
++ uint32_t core_clock_mhz;
++ uint32_t prev_n;
++ uint32_t pwr_lock;
++};
++
++/* Clock defines for power control */
++#define CLOCK_PRESET 175
++
++/* DMA engine register BIT mask wrappers.. */
++#define DMA_START_BIT MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_MASK
++
++#define GET_RX_INTR_MASK (INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_MASK | \
++ INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK | \
++ INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_MASK | \
++ INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK | \
++ INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_MASK | \
++ INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK | \
++ INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_MASK | \
++ INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
++
++#define GET_Y0_ERR_MSK (MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK | \
++ MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK | \
++ MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK | \
++ MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK)
++
++#define GET_UV0_ERR_MSK (MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK | \
++ MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK | \
++ MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK | \
++ MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK)
++
++#define GET_Y1_ERR_MSK (MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK | \
++ MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK | \
++ MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK | \
++ MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK)
++
++#define GET_UV1_ERR_MSK (MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK | \
++ MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK | \
++ MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK | \
++ MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK)
++
++
++/**** API Exposed to the other layers ****/
++BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp,
++ void *buffer, uint32_t sz);
++BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd);
++bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw);
++BC_STATUS crystalhd_hw_open(struct crystalhd_hw *, struct crystalhd_adp *);
++BC_STATUS crystalhd_hw_close(struct crystalhd_hw *);
++BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *);
++BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *);
++
++
++BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq,
++ hw_comp_callback call_back,
++ wait_queue_head_t *cb_event,
++ uint32_t *list_id, uint8_t data_flags);
++
++BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw);
++BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw);
++BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw);
++BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id);
++BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
++ crystalhd_dio_req *ioreq, bool en_post);
++BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
++ BC_PIC_INFO_BLOCK *pib,
++ crystalhd_dio_req **ioreq);
++BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw);
++BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw);
++void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats);
++
++/* API to program the core clock on the decoder */
++BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *);
++
++#endif
+diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
+new file mode 100644
+index 0000000..1f36b4d
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_lnx.c
+@@ -0,0 +1,780 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_lnx . c
++ *
++ * Description:
++ * BCM70010 Linux driver
++ *
++ * HISTORY:
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#include <linux/version.h>
++
++#include "crystalhd_lnx.h"
++
++static struct class *crystalhd_class;
++
++static struct crystalhd_adp *g_adp_info;
++
++static irqreturn_t chd_dec_isr(int irq, void *arg)
++{
++ struct crystalhd_adp *adp = (struct crystalhd_adp *) arg;
++ int rc = 0;
++ if (adp)
++ rc = crystalhd_cmd_interrupt(&adp->cmds);
++
++ return IRQ_RETVAL(rc);
++}
++
++static int chd_dec_enable_int(struct crystalhd_adp *adp)
++{
++ int rc = 0;
++
++ if (!adp || !adp->pdev) {
++ BCMLOG_ERR("Invalid arg!!\n");
++ return -EINVAL;
++ }
++
++ if (adp->pdev->msi_enabled)
++ adp->msi = 1;
++ else
++ adp->msi = pci_enable_msi(adp->pdev);
++
++ rc = request_irq(adp->pdev->irq, chd_dec_isr, IRQF_SHARED,
++ adp->name, (void *)adp);
++ if (rc) {
++ BCMLOG_ERR("Interrupt request failed.. \n");
++ pci_disable_msi(adp->pdev);
++ }
++
++ return rc;
++}
++
++static int chd_dec_disable_int(struct crystalhd_adp *adp)
++{
++ if (!adp || !adp->pdev) {
++ BCMLOG_ERR("Invalid arg!!\n");
++ return -EINVAL;
++ }
++
++ free_irq(adp->pdev->irq, adp);
++
++ if (adp->msi)
++ pci_disable_msi(adp->pdev);
++
++ return 0;
++}
++
++crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp, bool isr)
++{
++ unsigned long flags = 0;
++ crystalhd_ioctl_data *temp;
++
++ if (!adp)
++ return NULL;
++
++ spin_lock_irqsave(&adp->lock, flags);
++
++ temp = adp->idata_free_head;
++ if (temp) {
++ adp->idata_free_head = adp->idata_free_head->next;
++ memset(temp, 0, sizeof(*temp));
++ }
++
++ spin_unlock_irqrestore(&adp->lock, flags);
++ return temp;
++}
++
++void chd_dec_free_iodata(struct crystalhd_adp *adp, crystalhd_ioctl_data *iodata,
++ bool isr)
++{
++ unsigned long flags = 0;
++
++ if (!adp || !iodata)
++ return;
++
++ spin_lock_irqsave(&adp->lock, flags);
++ iodata->next = adp->idata_free_head;
++ adp->idata_free_head = iodata;
++ spin_unlock_irqrestore(&adp->lock, flags);
++}
++
++static inline int crystalhd_user_data(unsigned long ud, void *dr, int size, int set)
++{
++ int rc;
++
++ if (!ud || !dr) {
++ BCMLOG_ERR("Invalid arg \n");
++ return -EINVAL;
++ }
++
++ if (set)
++ rc = copy_to_user((void *)ud, dr, size);
++ else
++ rc = copy_from_user(dr, (void *)ud, size);
++
++ if (rc) {
++ BCMLOG_ERR("Invalid args for command \n");
++ rc = -EFAULT;
++ }
++
++ return rc;
++}
++
++static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, crystalhd_ioctl_data *io,
++ uint32_t m_sz, unsigned long ua)
++{
++ unsigned long ua_off;
++ int rc = 0;
++
++ if (!adp || !io || !ua || !m_sz) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return -EINVAL;
++ }
++
++ io->add_cdata = vmalloc(m_sz);
++ if (!io->add_cdata) {
++ BCMLOG_ERR("kalloc fail for sz:%x\n", m_sz);
++ return -ENOMEM;
++ }
++
++ io->add_cdata_sz = m_sz;
++ ua_off = ua + sizeof(io->udata);
++ rc = crystalhd_user_data(ua_off, io->add_cdata, io->add_cdata_sz, 0);
++ if (rc) {
++ BCMLOG_ERR("failed to pull add_cdata sz:%x ua_off:%x\n",
++ io->add_cdata_sz, (unsigned int)ua_off);
++ if (io->add_cdata) {
++ kfree(io->add_cdata);
++ io->add_cdata = NULL;
++ }
++ return -ENODATA;
++ }
++
++ return rc;
++}
++
++static int chd_dec_release_cdata(struct crystalhd_adp *adp,
++ crystalhd_ioctl_data *io, unsigned long ua)
++{
++ unsigned long ua_off;
++ int rc;
++
++ if (!adp || !io || !ua) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return -EINVAL;
++ }
++
++ if (io->cmd != BCM_IOC_FW_DOWNLOAD) {
++ ua_off = ua + sizeof(io->udata);
++ rc = crystalhd_user_data(ua_off, io->add_cdata,
++ io->add_cdata_sz, 1);
++ if (rc) {
++ BCMLOG_ERR("failed to push add_cdata sz:%x ua_off:%x\n",
++ io->add_cdata_sz, (unsigned int)ua_off);
++ return -ENODATA;
++ }
++ }
++
++ if (io->add_cdata) {
++ vfree(io->add_cdata);
++ io->add_cdata = NULL;
++ }
++
++ return 0;
++}
++
++static int chd_dec_proc_user_data(struct crystalhd_adp *adp,
++ crystalhd_ioctl_data *io,
++ unsigned long ua, int set)
++{
++ int rc;
++ uint32_t m_sz = 0;
++
++ if (!adp || !io || !ua) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return -EINVAL;
++ }
++
++ rc = crystalhd_user_data(ua, &io->udata, sizeof(io->udata), set);
++ if (rc) {
++ BCMLOG_ERR("failed to %s iodata \n", (set ? "set" : "get"));
++ return rc;
++ }
++
++ switch (io->cmd) {
++ case BCM_IOC_MEM_RD:
++ case BCM_IOC_MEM_WR:
++ case BCM_IOC_FW_DOWNLOAD:
++ m_sz = io->udata.u.devMem.NumDwords * 4;
++ if (set)
++ rc = chd_dec_release_cdata(adp, io, ua);
++ else
++ rc = chd_dec_fetch_cdata(adp, io, m_sz, ua);
++ break;
++ default:
++ break;
++ }
++
++ return rc;
++}
++
++static int chd_dec_api_cmd(struct crystalhd_adp *adp, unsigned long ua,
++ uint32_t uid, uint32_t cmd, crystalhd_cmd_proc func)
++{
++ int rc;
++ crystalhd_ioctl_data *temp;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ temp = chd_dec_alloc_iodata(adp, 0);
++ if (!temp) {
++ BCMLOG_ERR("Failed to get iodata..\n");
++ return -EINVAL;
++ }
++
++ temp->u_id = uid;
++ temp->cmd = cmd;
++
++ rc = chd_dec_proc_user_data(adp, temp, ua, 0);
++ if (!rc) {
++ sts = func(&adp->cmds, temp);
++ if (sts == BC_STS_PENDING)
++ sts = BC_STS_NOT_IMPL;
++ temp->udata.RetSts = sts;
++ rc = chd_dec_proc_user_data(adp, temp, ua, 1);
++ }
++
++ if (temp) {
++ chd_dec_free_iodata(adp, temp, 0);
++ temp = NULL;
++ }
++
++ return rc;
++}
++
++/* ========================= API interfaces =================================*/
++static int chd_dec_ioctl(struct inode *in, struct file *fd,
++ unsigned int cmd, unsigned long ua)
++{
++ struct crystalhd_adp *adp = chd_get_adp();
++ crystalhd_cmd_proc cproc;
++ struct crystalhd_user *uc;
++
++ if (!adp || !fd) {
++ BCMLOG_ERR("Invalid adp\n");
++ return -EINVAL;
++ }
++
++ uc = (struct crystalhd_user *)fd->private_data;
++ if (!uc) {
++ BCMLOG_ERR("Failed to get uc\n");
++ return -ENODATA;
++ }
++
++ cproc = crystalhd_get_cmd_proc(&adp->cmds, cmd, uc);
++ if (!cproc) {
++ BCMLOG_ERR("Unhandled command: %d\n", cmd);
++ return -EINVAL;
++ }
++
++ return chd_dec_api_cmd(adp, ua, uc->uid, cmd, cproc);
++}
++
++static int chd_dec_open(struct inode *in, struct file *fd)
++{
++ struct crystalhd_adp *adp = chd_get_adp();
++ int rc = 0;
++ BC_STATUS sts = BC_STS_SUCCESS;
++ struct crystalhd_user *uc = NULL;
++
++ BCMLOG_ENTER;
++ if (!adp) {
++ BCMLOG_ERR("Invalid adp\n");
++ return -EINVAL;
++ }
++
++ if (adp->cfg_users >= BC_LINK_MAX_OPENS) {
++ BCMLOG(BCMLOG_INFO, "Already in use.%d\n", adp->cfg_users);
++ return -EBUSY;
++ }
++
++ sts = crystalhd_user_open(&adp->cmds, &uc);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("cmd_user_open - %d \n", sts);
++ rc = -EBUSY;
++ }
++
++ adp->cfg_users++;
++
++ fd->private_data = uc;
++
++ return rc;
++}
++
++static int chd_dec_close(struct inode *in, struct file *fd)
++{
++ struct crystalhd_adp *adp = chd_get_adp();
++ struct crystalhd_user *uc;
++
++ BCMLOG_ENTER;
++ if (!adp) {
++ BCMLOG_ERR("Invalid adp \n");
++ return -EINVAL;
++ }
++
++ uc = (struct crystalhd_user *)fd->private_data;
++ if (!uc) {
++ BCMLOG_ERR("Failed to get uc\n");
++ return -ENODATA;
++ }
++
++ crystalhd_user_close(&adp->cmds, uc);
++
++ adp->cfg_users--;
++
++ return 0;
++}
++
++static const struct file_operations chd_dec_fops = {
++ .owner = THIS_MODULE,
++ .ioctl = chd_dec_ioctl,
++ .open = chd_dec_open,
++ .release = chd_dec_close,
++};
++
++static int chd_dec_init_chdev(struct crystalhd_adp *adp)
++{
++ crystalhd_ioctl_data *temp;
++ struct device *dev;
++ int rc = -ENODEV, i = 0;
++
++ if (!adp)
++ goto fail;
++
++ adp->chd_dec_major = register_chrdev(0, CRYSTALHD_API_NAME,
++ &chd_dec_fops);
++ if (adp->chd_dec_major < 0) {
++ BCMLOG_ERR("Failed to create config dev\n");
++ rc = adp->chd_dec_major;
++ goto fail;
++ }
++
++ /* register crystalhd class */
++ crystalhd_class = class_create(THIS_MODULE, "crystalhd");
++ if (IS_ERR(crystalhd_class)) {
++ BCMLOG_ERR("failed to create class\n");
++ goto fail;
++ }
++
++ dev = device_create(crystalhd_class, NULL, MKDEV(adp->chd_dec_major, 0),
++ NULL, "crystalhd");
++ if (!dev) {
++ BCMLOG_ERR("failed to create device\n");
++ goto device_create_fail;
++ }
++
++ rc = crystalhd_create_elem_pool(adp, BC_LINK_ELEM_POOL_SZ);
++ if (rc) {
++ BCMLOG_ERR("failed to create device\n");
++ goto elem_pool_fail;
++ }
++
++ /* Allocate general purpose ioctl pool. */
++ for (i = 0; i < CHD_IODATA_POOL_SZ; i++) {
++ /* FIXME: jarod: why atomic? */
++ temp = kzalloc(sizeof(crystalhd_ioctl_data), GFP_ATOMIC);
++ if (!temp) {
++ BCMLOG_ERR("ioctl data pool kzalloc failed\n");
++ rc = -ENOMEM;
++ goto kzalloc_fail;
++ }
++ /* Add to global pool.. */
++ chd_dec_free_iodata(adp, temp, 0);
++ }
++
++ return 0;
++
++kzalloc_fail:
++ crystalhd_delete_elem_pool(adp);
++elem_pool_fail:
++ device_destroy(crystalhd_class, MKDEV(adp->chd_dec_major, 0));
++device_create_fail:
++ class_destroy(crystalhd_class);
++fail:
++ return rc;
++}
++
++static void chd_dec_release_chdev(struct crystalhd_adp *adp)
++{
++ crystalhd_ioctl_data *temp = NULL;
++ if (!adp)
++ return;
++
++ if (adp->chd_dec_major > 0) {
++ /* unregister crystalhd class */
++ device_destroy(crystalhd_class, MKDEV(adp->chd_dec_major, 0));
++ unregister_chrdev(adp->chd_dec_major, CRYSTALHD_API_NAME);
++ BCMLOG(BCMLOG_INFO, "released api device - %d\n",
++ adp->chd_dec_major);
++ class_destroy(crystalhd_class);
++ }
++ adp->chd_dec_major = 0;
++
++ /* Clear iodata pool.. */
++ do {
++ temp = chd_dec_alloc_iodata(adp, 0);
++ if (temp)
++ kfree(temp);
++ } while (temp);
++
++ crystalhd_delete_elem_pool(adp);
++}
++
++static int chd_pci_reserve_mem(struct crystalhd_adp *pinfo)
++{
++ int rc;
++ unsigned long bar2 = pci_resource_start(pinfo->pdev, 2);
++ uint32_t mem_len = pci_resource_len(pinfo->pdev, 2);
++ unsigned long bar0 = pci_resource_start(pinfo->pdev, 0);
++ uint32_t i2o_len = pci_resource_len(pinfo->pdev, 0);
++
++ BCMLOG(BCMLOG_SSTEP, "bar2:0x%lx-0x%08x bar0:0x%lx-0x%08x\n",
++ bar2, mem_len, bar0, i2o_len);
++
++ rc = check_mem_region(bar2, mem_len);
++ if (rc) {
++ BCMLOG_ERR("No valid mem region...\n");
++ return -ENOMEM;
++ }
++
++ pinfo->addr = ioremap_nocache(bar2, mem_len);
++ if (!pinfo->addr) {
++ BCMLOG_ERR("Failed to remap mem region...\n");
++ return -ENOMEM;
++ }
++
++ pinfo->pci_mem_start = bar2;
++ pinfo->pci_mem_len = mem_len;
++
++ rc = check_mem_region(bar0, i2o_len);
++ if (rc) {
++ BCMLOG_ERR("No valid mem region...\n");
++ return -ENOMEM;
++ }
++
++ pinfo->i2o_addr = ioremap_nocache(bar0, i2o_len);
++ if (!pinfo->i2o_addr) {
++ BCMLOG_ERR("Failed to remap mem region...\n");
++ return -ENOMEM;
++ }
++
++ pinfo->pci_i2o_start = bar0;
++ pinfo->pci_i2o_len = i2o_len;
++
++ rc = pci_request_regions(pinfo->pdev, pinfo->name);
++ if (rc < 0) {
++ BCMLOG_ERR("Region request failed: %d\n", rc);
++ return rc;
++ }
++
++ BCMLOG(BCMLOG_SSTEP, "Mapped addr:0x%08lx i2o_addr:0x%08lx\n",
++ (unsigned long)pinfo->addr, (unsigned long)pinfo->i2o_addr);
++
++ return 0;
++}
++
++static void chd_pci_release_mem(struct crystalhd_adp *pinfo)
++{
++ if (!pinfo)
++ return;
++
++ if (pinfo->addr)
++ iounmap(pinfo->addr);
++
++ if (pinfo->i2o_addr)
++ iounmap(pinfo->i2o_addr);
++
++ pci_release_regions(pinfo->pdev);
++}
++
++
++static void chd_dec_pci_remove(struct pci_dev *pdev)
++{
++ struct crystalhd_adp *pinfo;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ BCMLOG_ENTER;
++
++ pinfo = (struct crystalhd_adp *) pci_get_drvdata(pdev);
++ if (!pinfo) {
++ BCMLOG_ERR("could not get adp\n");
++ return;
++ }
++
++ sts = crystalhd_delete_cmd_context(&pinfo->cmds);
++ if (sts != BC_STS_SUCCESS)
++ BCMLOG_ERR("cmd delete :%d \n", sts);
++
++ chd_dec_release_chdev(pinfo);
++
++ chd_dec_disable_int(pinfo);
++
++ chd_pci_release_mem(pinfo);
++ pci_disable_device(pinfo->pdev);
++
++ kfree(pinfo);
++ g_adp_info = NULL;
++}
++
++static int chd_dec_pci_probe(struct pci_dev *pdev,
++ const struct pci_device_id *entry)
++{
++ struct crystalhd_adp *pinfo;
++ int rc;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x "
++ "s_vendor:0x%04x s_device: 0x%04x\n",
++ pdev->vendor, pdev->device, pdev->subsystem_vendor,
++ pdev->subsystem_device);
++
++ /* FIXME: jarod: why atomic? */
++ pinfo = kzalloc(sizeof(struct crystalhd_adp), GFP_ATOMIC);
++ if (!pinfo) {
++ BCMLOG_ERR("Failed to allocate memory\n");
++ return -ENOMEM;
++ }
++
++ pinfo->pdev = pdev;
++
++ rc = pci_enable_device(pdev);
++ if (rc) {
++ BCMLOG_ERR("Failed to enable PCI device\n");
++ return rc;
++ }
++
++ snprintf(pinfo->name, 31, "crystalhd_pci_e:%d:%d:%d",
++ pdev->bus->number, PCI_SLOT(pdev->devfn),
++ PCI_FUNC(pdev->devfn));
++
++ rc = chd_pci_reserve_mem(pinfo);
++ if (rc) {
++ BCMLOG_ERR("Failed to setup memory regions.\n");
++ return -ENOMEM;
++ }
++
++ pinfo->present = 1;
++ pinfo->drv_data = entry->driver_data;
++
++ /* Setup adapter level lock.. */
++ spin_lock_init(&pinfo->lock);
++
++ /* setup api stuff.. */
++ chd_dec_init_chdev(pinfo);
++ rc = chd_dec_enable_int(pinfo);
++ if (rc) {
++ BCMLOG_ERR("_enable_int err:%d \n", rc);
++ pci_disable_device(pdev);
++ return -ENODEV;
++ }
++
++ /* Set dma mask... */
++ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
++ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
++ pinfo->dmabits = 64;
++ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
++ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++ pinfo->dmabits = 32;
++ } else {
++ BCMLOG_ERR("Unabled to setup DMA %d\n", rc);
++ pci_disable_device(pdev);
++ return -ENODEV;
++ }
++
++ sts = crystalhd_setup_cmd_context(&pinfo->cmds, pinfo);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("cmd setup :%d \n", sts);
++ pci_disable_device(pdev);
++ return -ENODEV;
++ }
++
++ pci_set_master(pdev);
++
++ pci_set_drvdata(pdev, pinfo);
++
++ g_adp_info = pinfo;
++
++ return 0;
++
++}
++
++#ifdef CONFIG_PM
++int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct crystalhd_adp *adp;
++ crystalhd_ioctl_data *temp;
++ BC_STATUS sts = BC_STS_SUCCESS;
++
++ adp = (struct crystalhd_adp *)pci_get_drvdata(pdev);
++ if (!adp) {
++ BCMLOG_ERR("could not get adp\n");
++ return -ENODEV;
++ }
++
++ temp = chd_dec_alloc_iodata(adp, false);
++ if (!temp) {
++ BCMLOG_ERR("could not get ioctl data\n");
++ return -ENODEV;
++ }
++
++ sts = crystalhd_suspend(&adp->cmds, temp);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("BCM70012 Suspend %d\n", sts);
++ return -ENODEV;
++ }
++
++ chd_dec_free_iodata(adp, temp, false);
++ chd_dec_disable_int(adp);
++ pci_save_state(pdev);
++
++ /* Disable IO/bus master/irq router */
++ pci_disable_device(pdev);
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++ return 0;
++}
++
++int chd_dec_pci_resume(struct pci_dev *pdev)
++{
++ struct crystalhd_adp *adp;
++ BC_STATUS sts = BC_STS_SUCCESS;
++ int rc;
++
++ adp = (struct crystalhd_adp *)pci_get_drvdata(pdev);
++ if (!adp) {
++ BCMLOG_ERR("could not get adp\n");
++ return -ENODEV;
++ }
++
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++
++ /* device's irq possibly is changed, driver should take care */
++ if (pci_enable_device(pdev)) {
++ BCMLOG_ERR("Failed to enable PCI device\n");
++ return 1;
++ }
++
++ pci_set_master(pdev);
++
++ rc = chd_dec_enable_int(adp);
++ if (rc) {
++ BCMLOG_ERR("_enable_int err:%d \n", rc);
++ pci_disable_device(pdev);
++ return -ENODEV;
++ }
++
++ sts = crystalhd_resume(&adp->cmds);
++ if (sts != BC_STS_SUCCESS) {
++ BCMLOG_ERR("BCM70012 Resume %d\n", sts);
++ pci_disable_device(pdev);
++ return -ENODEV;
++ }
++
++ return 0;
++}
++#endif
++
++static struct pci_device_id chd_dec_pci_id_table[] = {
++/* vendor, device, subvendor, subdevice, class, classmask, driver_data */
++ { 0x14e4, 0x1612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
++ { 0, },
++};
++
++struct pci_driver bc_chd_70012_driver = {
++ .name = "Broadcom 70012 Decoder",
++ .probe = chd_dec_pci_probe,
++ .remove = chd_dec_pci_remove,
++ .id_table = chd_dec_pci_id_table,
++#ifdef CONFIG_PM
++ .suspend = chd_dec_pci_suspend,
++ .resume = chd_dec_pci_resume
++#endif
++};
++MODULE_DEVICE_TABLE(pci, chd_dec_pci_id_table);
++
++
++void chd_set_log_level(struct crystalhd_adp *adp, char *arg)
++{
++ if ((!arg) || (strlen(arg) < 3))
++ g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA;
++ else if (!strncmp(arg, "sstep", 5))
++ g_linklog_level = BCMLOG_INFO | BCMLOG_DATA | BCMLOG_DBG |
++ BCMLOG_SSTEP | BCMLOG_ERROR;
++ else if (!strncmp(arg, "info", 4))
++ g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA | BCMLOG_INFO;
++ else if (!strncmp(arg, "debug", 5))
++ g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA | BCMLOG_INFO |
++ BCMLOG_DBG;
++ else if (!strncmp(arg, "pball", 5))
++ g_linklog_level = 0xFFFFFFFF & ~(BCMLOG_SPINLOCK);
++ else if (!strncmp(arg, "silent", 6))
++ g_linklog_level = 0;
++ else
++ g_linklog_level = 0;
++}
++
++struct crystalhd_adp *chd_get_adp(void)
++{
++ return g_adp_info;
++}
++
++int __init chd_dec_module_init(void)
++{
++ int rc;
++
++ chd_set_log_level(NULL, "debug");
++ BCMLOG(BCMLOG_DATA, "Loading crystalhd %d.%d.%d \n",
++ crystalhd_kmod_major, crystalhd_kmod_minor, crystalhd_kmod_rev);
++
++ rc = pci_register_driver(&bc_chd_70012_driver);
++
++ if (rc < 0)
++ BCMLOG_ERR("Could not find any devices. err:%d \n", rc);
++
++ return rc;
++}
++
++void __exit chd_dec_module_cleanup(void)
++{
++ BCMLOG(BCMLOG_DATA, "unloading crystalhd %d.%d.%d \n",
++ crystalhd_kmod_major, crystalhd_kmod_minor, crystalhd_kmod_rev);
++
++ pci_unregister_driver(&bc_chd_70012_driver);
++}
++
++
++MODULE_AUTHOR("Naren Sankar <nsankar@broadcom.com>");
++MODULE_AUTHOR("Prasad Bolisetty <prasadb@broadcom.com>");
++MODULE_DESCRIPTION(CRYSTAL_HD_NAME);
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("bcm70012");
++
++module_init(chd_dec_module_init);
++module_exit(chd_dec_module_cleanup);
++
+diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
+new file mode 100644
+index 0000000..d3f9fc4
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_lnx.h
+@@ -0,0 +1,96 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_lnx . c
++ *
++ * Description:
++ * BCM70012 Linux driver
++ *
++ * HISTORY:
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#ifndef _CRYSTALHD_LNX_H_
++#define _CRYSTALHD_LNX_H_
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/pagemap.h>
++#include <linux/vmalloc.h>
++
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++
++#include "crystalhd_cmds.h"
++
++#define CRYSTAL_HD_NAME "Broadcom Crystal HD Decoder (BCM70012) Driver"
++
++
++/* OS specific PCI information structure and adapter information. */
++struct crystalhd_adp {
++ /* Hardware borad/PCI specifics */
++ char name[32];
++ struct pci_dev *pdev;
++
++ unsigned long pci_mem_start;
++ uint32_t pci_mem_len;
++ void *addr;
++
++ unsigned long pci_i2o_start;
++ uint32_t pci_i2o_len;
++ void *i2o_addr;
++
++ unsigned int drv_data;
++ unsigned int dmabits; /* 32 | 64 */
++ unsigned int registered;
++ unsigned int present;
++ unsigned int msi;
++
++ spinlock_t lock;
++
++ /* API Related */
++ unsigned int chd_dec_major;
++ unsigned int cfg_users;
++
++ crystalhd_ioctl_data *idata_free_head; /* ioctl data pool */
++ crystalhd_elem_t *elem_pool_head; /* Queue element pool */
++
++ struct crystalhd_cmd cmds;
++
++ crystalhd_dio_req *ua_map_free_head;
++ struct pci_pool *fill_byte_pool;
++};
++
++
++struct crystalhd_adp *chd_get_adp(void);
++void chd_set_log_level(struct crystalhd_adp *adp, char *arg);
++
++#endif
++
+diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
+new file mode 100644
+index 0000000..32e632c
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_misc.c
+@@ -0,0 +1,1029 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_misc . c
++ *
++ * Description:
++ * BCM70012 Linux driver misc routines.
++ *
++ * HISTORY:
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#include "crystalhd_misc.h"
++#include "crystalhd_lnx.h"
++
++uint32_t g_linklog_level;
++
++static inline uint32_t crystalhd_dram_rd(struct crystalhd_adp *adp, uint32_t mem_off)
++{
++ crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (mem_off >> 19));
++ return bc_dec_reg_rd(adp, (0x00380000 | (mem_off & 0x0007FFFF)));
++}
++
++static inline void crystalhd_dram_wr(struct crystalhd_adp *adp, uint32_t mem_off, uint32_t val)
++{
++ crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (mem_off >> 19));
++ bc_dec_reg_wr(adp, (0x00380000 | (mem_off & 0x0007FFFF)), val);
++}
++
++static inline BC_STATUS bc_chk_dram_range(struct crystalhd_adp *adp, uint32_t start_off, uint32_t cnt)
++{
++ return BC_STS_SUCCESS;
++}
++
++static crystalhd_dio_req *crystalhd_alloc_dio(struct crystalhd_adp *adp)
++{
++ unsigned long flags = 0;
++ crystalhd_dio_req *temp = NULL;
++
++ if (!adp) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return temp;
++ }
++
++ spin_lock_irqsave(&adp->lock, flags);
++ temp = adp->ua_map_free_head;
++ if (temp)
++ adp->ua_map_free_head = adp->ua_map_free_head->next;
++ spin_unlock_irqrestore(&adp->lock, flags);
++
++ return temp;
++}
++
++static void crystalhd_free_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio)
++{
++ unsigned long flags = 0;
++
++ if (!adp || !dio)
++ return;
++ spin_lock_irqsave(&adp->lock, flags);
++ dio->sig = crystalhd_dio_inv;
++ dio->page_cnt = 0;
++ dio->fb_size = 0;
++ memset(&dio->uinfo, 0, sizeof(dio->uinfo));
++ dio->next = adp->ua_map_free_head;
++ adp->ua_map_free_head = dio;
++ spin_unlock_irqrestore(&adp->lock, flags);
++}
++
++static crystalhd_elem_t *crystalhd_alloc_elem(struct crystalhd_adp *adp)
++{
++ unsigned long flags = 0;
++ crystalhd_elem_t *temp = NULL;
++
++ if (!adp)
++ return temp;
++ spin_lock_irqsave(&adp->lock, flags);
++ temp = adp->elem_pool_head;
++ if (temp) {
++ adp->elem_pool_head = adp->elem_pool_head->flink;
++ memset(temp, 0, sizeof(*temp));
++ }
++ spin_unlock_irqrestore(&adp->lock, flags);
++
++ return temp;
++}
++static void crystalhd_free_elem(struct crystalhd_adp *adp, crystalhd_elem_t *elem)
++{
++ unsigned long flags = 0;
++
++ if (!adp || !elem)
++ return;
++ spin_lock_irqsave(&adp->lock, flags);
++ elem->flink = adp->elem_pool_head;
++ adp->elem_pool_head = elem;
++ spin_unlock_irqrestore(&adp->lock, flags);
++}
++
++static inline void crystalhd_set_sg(struct scatterlist *sg, struct page *page,
++ unsigned int len, unsigned int offset)
++{
++ sg_set_page(sg, page, len, offset);
++#ifdef CONFIG_X86_64
++ sg->dma_length = len;
++#endif
++}
++
++static inline void crystalhd_init_sg(struct scatterlist *sg, unsigned int entries)
++{
++ /* http://lkml.org/lkml/2007/11/27/68 */
++ sg_init_table(sg, entries);
++}
++
++/*========================== Extern ========================================*/
++/**
++ * bc_dec_reg_rd - Read 7412's device register.
++ * @adp: Adapter instance
++ * @reg_off: Register offset.
++ *
++ * Return:
++ * 32bit value read
++ *
++ * 7412's device register read routine. This interface use
++ * 7412's device access range mapped from BAR-2 (4M) of PCIe
++ * configuration space.
++ */
++uint32_t bc_dec_reg_rd(struct crystalhd_adp *adp, uint32_t reg_off)
++{
++ if (!adp || (reg_off > adp->pci_mem_len)) {
++ BCMLOG_ERR("dec_rd_reg_off outof range: 0x%08x\n", reg_off);
++ return 0;
++ }
++
++ return readl(adp->addr + reg_off);
++}
++
++/**
++ * bc_dec_reg_wr - Write 7412's device register
++ * @adp: Adapter instance
++ * @reg_off: Register offset.
++ * @val: Dword value to be written.
++ *
++ * Return:
++ * none.
++ *
++ * 7412's device register write routine. This interface use
++ * 7412's device access range mapped from BAR-2 (4M) of PCIe
++ * configuration space.
++ */
++void bc_dec_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off, uint32_t val)
++{
++ if (!adp || (reg_off > adp->pci_mem_len)) {
++ BCMLOG_ERR("dec_wr_reg_off outof range: 0x%08x\n", reg_off);
++ return;
++ }
++ writel(val, adp->addr + reg_off);
++ udelay(8);
++}
++
++/**
++ * crystalhd_reg_rd - Read Link's device register.
++ * @adp: Adapter instance
++ * @reg_off: Register offset.
++ *
++ * Return:
++ * 32bit value read
++ *
++ * Link device register read routine. This interface use
++ * Link's device access range mapped from BAR-1 (64K) of PCIe
++ * configuration space.
++ *
++ */
++uint32_t crystalhd_reg_rd(struct crystalhd_adp *adp, uint32_t reg_off)
++{
++ if (!adp || (reg_off > adp->pci_i2o_len)) {
++ BCMLOG_ERR("link_rd_reg_off outof range: 0x%08x\n", reg_off);
++ return 0;
++ }
++ return readl(adp->i2o_addr + reg_off);
++}
++
++/**
++ * crystalhd_reg_wr - Write Link's device register
++ * @adp: Adapter instance
++ * @reg_off: Register offset.
++ * @val: Dword value to be written.
++ *
++ * Return:
++ * none.
++ *
++ * Link device register write routine. This interface use
++ * Link's device access range mapped from BAR-1 (64K) of PCIe
++ * configuration space.
++ *
++ */
++void crystalhd_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off, uint32_t val)
++{
++ if (!adp || (reg_off > adp->pci_i2o_len)) {
++ BCMLOG_ERR("link_wr_reg_off outof range: 0x%08x\n", reg_off);
++ return;
++ }
++ writel(val, adp->i2o_addr + reg_off);
++}
++
++/**
++ * crystalhd_mem_rd - Read data from 7412's DRAM area.
++ * @adp: Adapter instance
++ * @start_off: Start offset.
++ * @dw_cnt: Count in dwords.
++ * @rd_buff: Buffer to copy the data from dram.
++ *
++ * Return:
++ * Status.
++ *
++ * 7412's Dram read routine.
++ */
++BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *adp, uint32_t start_off,
++ uint32_t dw_cnt, uint32_t *rd_buff)
++{
++ uint32_t ix = 0;
++
++ if (!adp || !rd_buff ||
++ (bc_chk_dram_range(adp, start_off, dw_cnt) != BC_STS_SUCCESS)) {
++ BCMLOG_ERR("Invalid arg \n");
++ return BC_STS_INV_ARG;
++ }
++ for (ix = 0; ix < dw_cnt; ix++)
++ rd_buff[ix] = crystalhd_dram_rd(adp, (start_off + (ix * 4)));
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_mem_wr - Write data to 7412's DRAM area.
++ * @adp: Adapter instance
++ * @start_off: Start offset.
++ * @dw_cnt: Count in dwords.
++ * @wr_buff: Data Buffer to be written.
++ *
++ * Return:
++ * Status.
++ *
++ * 7412's Dram write routine.
++ */
++BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *adp, uint32_t start_off,
++ uint32_t dw_cnt, uint32_t *wr_buff)
++{
++ uint32_t ix = 0;
++
++ if (!adp || !wr_buff ||
++ (bc_chk_dram_range(adp, start_off, dw_cnt) != BC_STS_SUCCESS)) {
++ BCMLOG_ERR("Invalid arg \n");
++ return BC_STS_INV_ARG;
++ }
++
++ for (ix = 0; ix < dw_cnt; ix++)
++ crystalhd_dram_wr(adp, (start_off + (ix * 4)), wr_buff[ix]);
++
++ return BC_STS_SUCCESS;
++}
++/**
++ * crystalhd_pci_cfg_rd - PCIe config read
++ * @adp: Adapter instance
++ * @off: PCI config space offset.
++ * @len: Size -- Byte, Word & dword.
++ * @val: Value read
++ *
++ * Return:
++ * Status.
++ *
++ * Get value from Link's PCIe config space.
++ */
++BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *adp, uint32_t off,
++ uint32_t len, uint32_t *val)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++ int rc = 0;
++
++ if (!adp || !val) {
++ BCMLOG_ERR("Invalid arg \n");
++ return BC_STS_INV_ARG;
++ }
++
++ switch (len) {
++ case 1:
++ rc = pci_read_config_byte(adp->pdev, off, (u8 *)val);
++ break;
++ case 2:
++ rc = pci_read_config_word(adp->pdev, off, (u16 *)val);
++ break;
++ case 4:
++ rc = pci_read_config_dword(adp->pdev, off, (u32 *)val);
++ break;
++ default:
++ rc = -EINVAL;
++ sts = BC_STS_INV_ARG;
++ BCMLOG_ERR("Invalid len:%d\n", len);
++ };
++
++ if (rc && (sts == BC_STS_SUCCESS))
++ sts = BC_STS_ERROR;
++
++ return sts;
++}
++
++/**
++ * crystalhd_pci_cfg_wr - PCIe config write
++ * @adp: Adapter instance
++ * @off: PCI config space offset.
++ * @len: Size -- Byte, Word & dword.
++ * @val: Value to be written
++ *
++ * Return:
++ * Status.
++ *
++ * Set value to Link's PCIe config space.
++ */
++BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *adp, uint32_t off,
++ uint32_t len, uint32_t val)
++{
++ BC_STATUS sts = BC_STS_SUCCESS;
++ int rc = 0;
++
++ if (!adp || !val) {
++ BCMLOG_ERR("Invalid arg \n");
++ return BC_STS_INV_ARG;
++ }
++
++ switch (len) {
++ case 1:
++ rc = pci_write_config_byte(adp->pdev, off, (u8)val);
++ break;
++ case 2:
++ rc = pci_write_config_word(adp->pdev, off, (u16)val);
++ break;
++ case 4:
++ rc = pci_write_config_dword(adp->pdev, off, val);
++ break;
++ default:
++ rc = -EINVAL;
++ sts = BC_STS_INV_ARG;
++ BCMLOG_ERR("Invalid len:%d\n", len);
++ };
++
++ if (rc && (sts == BC_STS_SUCCESS))
++ sts = BC_STS_ERROR;
++
++ return sts;
++}
++
++/**
++ * bc_kern_dma_alloc - Allocate memory for Dma rings
++ * @adp: Adapter instance
++ * @sz: Size of the memory to allocate.
++ * @phy_addr: Physical address of the memory allocated.
++ * Typedef to system's dma_addr_t (u64)
++ *
++ * Return:
++ * Pointer to allocated memory..
++ *
++ * Wrapper to Linux kernel interface.
++ *
++ */
++void *bc_kern_dma_alloc(struct crystalhd_adp *adp, uint32_t sz,
++ dma_addr_t *phy_addr)
++{
++ void *temp = NULL;
++
++ if (!adp || !sz || !phy_addr) {
++ BCMLOG_ERR("Invalide Arg..\n");
++ return temp;
++ }
++
++ temp = pci_alloc_consistent(adp->pdev, sz, phy_addr);
++ if (temp)
++ memset(temp, 0, sz);
++
++ return temp;
++}
++
++/**
++ * bc_kern_dma_free - Release Dma ring memory.
++ * @adp: Adapter instance
++ * @sz: Size of the memory to allocate.
++ * @ka: Kernel virtual address returned during _dio_alloc()
++ * @phy_addr: Physical address of the memory allocated.
++ * Typedef to system's dma_addr_t (u64)
++ *
++ * Return:
++ * none.
++ */
++void bc_kern_dma_free(struct crystalhd_adp *adp, uint32_t sz, void *ka,
++ dma_addr_t phy_addr)
++{
++ if (!adp || !ka || !sz || !phy_addr) {
++ BCMLOG_ERR("Invalide Arg..\n");
++ return;
++ }
++
++ pci_free_consistent(adp->pdev, sz, ka, phy_addr);
++}
++
++/**
++ * crystalhd_create_dioq - Create Generic DIO queue
++ * @adp: Adapter instance
++ * @dioq_hnd: Handle to the dio queue created
++ * @cb : Optional - Call back To free the element.
++ * @cbctx: Context to pass to callback.
++ *
++ * Return:
++ * status
++ *
++ * Initialize Generic DIO queue to hold any data. Callback
++ * will be used to free elements while deleting the queue.
++ */
++BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *adp,
++ crystalhd_dioq_t **dioq_hnd,
++ crystalhd_data_free_cb cb, void *cbctx)
++{
++ crystalhd_dioq_t *dioq = NULL;
++
++ if (!adp || !dioq_hnd) {
++ BCMLOG_ERR("Invalid arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ dioq = kzalloc(sizeof(*dioq), GFP_KERNEL);
++ if (!dioq)
++ return BC_STS_INSUFF_RES;
++
++ spin_lock_init(&dioq->lock);
++ dioq->sig = BC_LINK_DIOQ_SIG;
++ dioq->head = (crystalhd_elem_t *)&dioq->head;
++ dioq->tail = (crystalhd_elem_t *)&dioq->head;
++ crystalhd_create_event(&dioq->event);
++ dioq->adp = adp;
++ dioq->data_rel_cb = cb;
++ dioq->cb_context = cbctx;
++ *dioq_hnd = dioq;
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_delete_dioq - Delete Generic DIO queue
++ * @adp: Adapter instance
++ * @dioq: DIOQ instance..
++ *
++ * Return:
++ * None.
++ *
++ * Release Generic DIO queue. This function will remove
++ * all the entries from the Queue and will release data
++ * by calling the call back provided during creation.
++ *
++ */
++void crystalhd_delete_dioq(struct crystalhd_adp *adp, crystalhd_dioq_t *dioq)
++{
++ void *temp;
++
++ if (!dioq || (dioq->sig != BC_LINK_DIOQ_SIG))
++ return;
++
++ do {
++ temp = crystalhd_dioq_fetch(dioq);
++ if (temp && dioq->data_rel_cb)
++ dioq->data_rel_cb(dioq->cb_context, temp);
++ } while (temp);
++ dioq->sig = 0;
++ kfree(dioq);
++}
++
++/**
++ * crystalhd_dioq_add - Add new DIO request element.
++ * @ioq: DIO queue instance
++ * @t: DIO request to be added.
++ * @wake: True - Wake up suspended process.
++ * @tag: Special tag to assign - For search and get.
++ *
++ * Return:
++ * Status.
++ *
++ * Insert new element to Q tail.
++ */
++BC_STATUS crystalhd_dioq_add(crystalhd_dioq_t *ioq, void *data,
++ bool wake, uint32_t tag)
++{
++ unsigned long flags = 0;
++ crystalhd_elem_t *tmp;
++
++ if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !data) {
++ BCMLOG_ERR("Invalid arg!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ tmp = crystalhd_alloc_elem(ioq->adp);
++ if (!tmp) {
++ BCMLOG_ERR("No free elements.\n");
++ return BC_STS_INSUFF_RES;
++ }
++
++ tmp->data = data;
++ tmp->tag = tag;
++ spin_lock_irqsave(&ioq->lock, flags);
++ tmp->flink = (crystalhd_elem_t *)&ioq->head;
++ tmp->blink = ioq->tail;
++ tmp->flink->blink = tmp;
++ tmp->blink->flink = tmp;
++ ioq->count++;
++ spin_unlock_irqrestore(&ioq->lock, flags);
++
++ if (wake)
++ crystalhd_set_event(&ioq->event);
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_dioq_fetch - Fetch element from head.
++ * @ioq: DIO queue instance
++ *
++ * Return:
++ * data element from the head..
++ *
++ * Remove an element from Queue.
++ */
++void *crystalhd_dioq_fetch(crystalhd_dioq_t *ioq)
++{
++ unsigned long flags = 0;
++ crystalhd_elem_t *tmp;
++ crystalhd_elem_t *ret = NULL;
++ void *data = NULL;
++
++ if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG)) {
++ BCMLOG_ERR("Invalid arg!!\n");
++ return data;
++ }
++
++ spin_lock_irqsave(&ioq->lock, flags);
++ tmp = ioq->head;
++ if (tmp != (crystalhd_elem_t *)&ioq->head) {
++ ret = tmp;
++ tmp->flink->blink = tmp->blink;
++ tmp->blink->flink = tmp->flink;
++ ioq->count--;
++ }
++ spin_unlock_irqrestore(&ioq->lock, flags);
++ if (ret) {
++ data = ret->data;
++ crystalhd_free_elem(ioq->adp, ret);
++ }
++
++ return data;
++}
++/**
++ * crystalhd_dioq_find_and_fetch - Search the tag and Fetch element
++ * @ioq: DIO queue instance
++ * @tag: Tag to search for.
++ *
++ * Return:
++ * element from the head..
++ *
++ * Search TAG and remove the element.
++ */
++void *crystalhd_dioq_find_and_fetch(crystalhd_dioq_t *ioq, uint32_t tag)
++{
++ unsigned long flags = 0;
++ crystalhd_elem_t *tmp;
++ crystalhd_elem_t *ret = NULL;
++ void *data = NULL;
++
++ if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG)) {
++ BCMLOG_ERR("Invalid arg!!\n");
++ return data;
++ }
++
++ spin_lock_irqsave(&ioq->lock, flags);
++ tmp = ioq->head;
++ while (tmp != (crystalhd_elem_t *)&ioq->head) {
++ if (tmp->tag == tag) {
++ ret = tmp;
++ tmp->flink->blink = tmp->blink;
++ tmp->blink->flink = tmp->flink;
++ ioq->count--;
++ break;
++ }
++ tmp = tmp->flink;
++ }
++ spin_unlock_irqrestore(&ioq->lock, flags);
++
++ if (ret) {
++ data = ret->data;
++ crystalhd_free_elem(ioq->adp, ret);
++ }
++
++ return data;
++}
++
++/**
++ * crystalhd_dioq_fetch_wait - Fetch element from Head.
++ * @ioq: DIO queue instance
++ * @to_secs: Wait timeout in seconds..
++ *
++ * Return:
++ * element from the head..
++ *
++ * Return element from head if Q is not empty. Wait for new element
++ * if Q is empty for Timeout seconds.
++ */
++void *crystalhd_dioq_fetch_wait(crystalhd_dioq_t *ioq, uint32_t to_secs,
++ uint32_t *sig_pend)
++{
++ unsigned long flags = 0;
++ int rc = 0, count;
++ void *tmp = NULL;
++
++ if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) {
++ BCMLOG_ERR("Invalid arg!!\n");
++ return tmp;
++ }
++
++ count = to_secs;
++ spin_lock_irqsave(&ioq->lock, flags);
++ while ((ioq->count == 0) && count) {
++ spin_unlock_irqrestore(&ioq->lock, flags);
++
++ crystalhd_wait_on_event(&ioq->event, (ioq->count > 0), 1000, rc, 0);
++ if (rc == 0) {
++ goto out;
++ } else if (rc == -EINTR) {
++ BCMLOG(BCMLOG_INFO, "Cancelling fetch wait\n");
++ *sig_pend = 1;
++ return tmp;
++ }
++ spin_lock_irqsave(&ioq->lock, flags);
++ count--;
++ }
++ spin_unlock_irqrestore(&ioq->lock, flags);
++
++out:
++ return crystalhd_dioq_fetch(ioq);
++}
++
++/**
++ * crystalhd_map_dio - Map user address for DMA
++ * @adp: Adapter instance
++ * @ubuff: User buffer to map.
++ * @ubuff_sz: User buffer size.
++ * @uv_offset: UV buffer offset.
++ * @en_422mode: TRUE:422 FALSE:420 Capture mode.
++ * @dir_tx: TRUE for Tx (To device from host)
++ * @dio_hnd: Handle to mapped DIO request.
++ *
++ * Return:
++ * Status.
++ *
++ * This routine maps user address and lock pages for DMA.
++ *
++ */
++BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
++ uint32_t ubuff_sz, uint32_t uv_offset,
++ bool en_422mode, bool dir_tx,
++ crystalhd_dio_req **dio_hnd)
++{
++ crystalhd_dio_req *dio;
++ /* FIXME: jarod: should some of these unsigned longs be uint32_t or uintptr_t? */
++ unsigned long start = 0, end = 0, uaddr = 0, count = 0;
++ unsigned long spsz = 0, uv_start = 0;
++ int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0;
++
++ if (!adp || !ubuff || !ubuff_sz || !dio_hnd) {
++ BCMLOG_ERR("Invalid arg \n");
++ return BC_STS_INV_ARG;
++ }
++ /* Compute pages */
++ uaddr = (unsigned long)ubuff;
++ count = (unsigned long)ubuff_sz;
++ end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = uaddr >> PAGE_SHIFT;
++ nr_pages = end - start;
++
++ if (!count || ((uaddr + count) < uaddr)) {
++ BCMLOG_ERR("User addr overflow!!\n");
++ return BC_STS_INV_ARG;
++ }
++
++ dio = crystalhd_alloc_dio(adp);
++ if (!dio) {
++ BCMLOG_ERR("dio pool empty..\n");
++ return BC_STS_INSUFF_RES;
++ }
++
++ if (dir_tx) {
++ rw = WRITE;
++ dio->direction = DMA_TO_DEVICE;
++ } else {
++ rw = READ;
++ dio->direction = DMA_FROM_DEVICE;
++ }
++
++ if (nr_pages > dio->max_pages) {
++ BCMLOG_ERR("max_pages(%d) exceeded(%d)!!\n",
++ dio->max_pages, nr_pages);
++ crystalhd_unmap_dio(adp, dio);
++ return BC_STS_INSUFF_RES;
++ }
++
++ if (uv_offset) {
++ uv_start = (uaddr + (unsigned long)uv_offset) >> PAGE_SHIFT;
++ dio->uinfo.uv_sg_ix = uv_start - start;
++ dio->uinfo.uv_sg_off = ((uaddr + (unsigned long)uv_offset) & ~PAGE_MASK);
++ }
++
++ dio->fb_size = ubuff_sz & 0x03;
++ if (dio->fb_size) {
++ res = copy_from_user(dio->fb_va,
++ (void *)(uaddr + count - dio->fb_size),
++ dio->fb_size);
++ if (res) {
++ BCMLOG_ERR("failed %d to copy %u fill bytes from %p\n",
++ res, dio->fb_size,
++ (void *)(uaddr + count-dio->fb_size));
++ crystalhd_unmap_dio(adp, dio);
++ return BC_STS_INSUFF_RES;
++ }
++ }
++
++ down_read(&current->mm->mmap_sem);
++ res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ,
++ 0, dio->pages, NULL);
++ up_read(&current->mm->mmap_sem);
++
++ /* Save for release..*/
++ dio->sig = crystalhd_dio_locked;
++ if (res < nr_pages) {
++ BCMLOG_ERR("get pages failed: %d-%d\n", nr_pages, res);
++ dio->page_cnt = res;
++ crystalhd_unmap_dio(adp, dio);
++ return BC_STS_ERROR;
++ }
++
++ dio->page_cnt = nr_pages;
++ /* Get scatter/gather */
++ crystalhd_init_sg(dio->sg, dio->page_cnt);
++ crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK);
++ if (nr_pages > 1) {
++ dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset;
++
++#ifdef CONFIG_X86_64
++ dio->sg[0].dma_length = dio->sg[0].length;
++#endif
++ count -= dio->sg[0].length;
++ for (i = 1; i < nr_pages; i++) {
++ if (count < 4) {
++ spsz = count;
++ skip_fb_sg = 1;
++ } else {
++ spsz = (count < PAGE_SIZE) ?
++ (count & ~0x03) : PAGE_SIZE;
++ }
++ crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0);
++ count -= spsz;
++ }
++ } else {
++ if (count < 4) {
++ dio->sg[0].length = count;
++ skip_fb_sg = 1;
++ } else {
++ dio->sg[0].length = count - dio->fb_size;
++ }
++#ifdef CONFIG_X86_64
++ dio->sg[0].dma_length = dio->sg[0].length;
++#endif
++ }
++ dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg,
++ dio->page_cnt, dio->direction);
++ if (dio->sg_cnt <= 0) {
++ BCMLOG_ERR("sg map %d-%d \n", dio->sg_cnt, dio->page_cnt);
++ crystalhd_unmap_dio(adp, dio);
++ return BC_STS_ERROR;
++ }
++ if (dio->sg_cnt && skip_fb_sg)
++ dio->sg_cnt -= 1;
++ dio->sig = crystalhd_dio_sg_mapped;
++ /* Fill in User info.. */
++ dio->uinfo.xfr_len = ubuff_sz;
++ dio->uinfo.xfr_buff = ubuff;
++ dio->uinfo.uv_offset = uv_offset;
++ dio->uinfo.b422mode = en_422mode;
++ dio->uinfo.dir_tx = dir_tx;
++
++ *dio_hnd = dio;
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_unmap_sgl - Release mapped resources
++ * @adp: Adapter instance
++ * @dio: DIO request instance
++ *
++ * Return:
++ * Status.
++ *
++ * This routine is to unmap the user buffer pages.
++ */
++BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio)
++{
++ struct page *page = NULL;
++ int j = 0;
++
++ if (!adp || !dio) {
++ BCMLOG_ERR("Invalid arg \n");
++ return BC_STS_INV_ARG;
++ }
++
++ if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) {
++ for (j = 0; j < dio->page_cnt; j++) {
++ page = dio->pages[j];
++ if (page) {
++ if (!PageReserved(page) &&
++ (dio->direction == DMA_FROM_DEVICE))
++ SetPageDirty(page);
++ page_cache_release(page);
++ }
++ }
++ }
++ if (dio->sig == crystalhd_dio_sg_mapped)
++ pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction);
++
++ crystalhd_free_dio(adp, dio);
++
++ return BC_STS_SUCCESS;
++}
++
++/**
++ * crystalhd_create_dio_pool - Allocate mem pool for DIO management.
++ * @adp: Adapter instance
++ * @max_pages: Max pages for size calculation.
++ *
++ * Return:
++ * system error.
++ *
++ * This routine creates a memory pool to hold dio context for
++ * for HW Direct IO operation.
++ */
++int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages)
++{
++ uint32_t asz = 0, i = 0;
++ uint8_t *temp;
++ crystalhd_dio_req *dio;
++
++ if (!adp || !max_pages) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return -EINVAL;
++ }
++
++ /* Get dma memory for fill byte handling..*/
++ adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte",
++ adp->pdev, 8, 8, 0);
++ if (!adp->fill_byte_pool) {
++ BCMLOG_ERR("failed to create fill byte pool\n");
++ return -ENOMEM;
++ }
++
++ /* Get the max size from user based on 420/422 modes */
++ asz = (sizeof(*dio->pages) * max_pages) +
++ (sizeof(*dio->sg) * max_pages) + sizeof(*dio);
++
++ BCMLOG(BCMLOG_DBG, "Initializing Dio pool %d %d %x %p\n",
++ BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool);
++
++ for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) {
++ temp = (uint8_t *)kzalloc(asz, GFP_KERNEL);
++ if ((temp) == NULL) {
++ BCMLOG_ERR("Failed to alloc %d mem\n", asz);
++ return -ENOMEM;
++ }
++
++ dio = (crystalhd_dio_req *)temp;
++ temp += sizeof(*dio);
++ dio->pages = (struct page **)temp;
++ temp += (sizeof(*dio->pages) * max_pages);
++ dio->sg = (struct scatterlist *)temp;
++ dio->max_pages = max_pages;
++ dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL,
++ &dio->fb_pa);
++ if (!dio->fb_va) {
++ BCMLOG_ERR("fill byte alloc failed.\n");
++ return -ENOMEM;
++ }
++
++ crystalhd_free_dio(adp, dio);
++ }
++
++ return 0;
++}
++
++/**
++ * crystalhd_destroy_dio_pool - Release DIO mem pool.
++ * @adp: Adapter instance
++ *
++ * Return:
++ * none.
++ *
++ * This routine releases dio memory pool during close.
++ */
++void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp)
++{
++ crystalhd_dio_req *dio;
++ int count = 0;
++
++ if (!adp) {
++ BCMLOG_ERR("Invalid Arg!!\n");
++ return;
++ }
++
++ do {
++ dio = crystalhd_alloc_dio(adp);
++ if (dio) {
++ if (dio->fb_va)
++ pci_pool_free(adp->fill_byte_pool,
++ dio->fb_va, dio->fb_pa);
++ count++;
++ kfree(dio);
++ }
++ } while (dio);
++
++ if (adp->fill_byte_pool) {
++ pci_pool_destroy(adp->fill_byte_pool);
++ adp->fill_byte_pool = NULL;
++ }
++
++ BCMLOG(BCMLOG_DBG, "Released dio pool %d \n", count);
++}
++
++/**
++ * crystalhd_create_elem_pool - List element pool creation.
++ * @adp: Adapter instance
++ * @pool_size: Number of elements in the pool.
++ *
++ * Return:
++ * 0 - success, <0 error
++ *
++ * Create general purpose list element pool to hold pending,
++ * and active requests.
++ */
++int crystalhd_create_elem_pool(struct crystalhd_adp *adp, uint32_t pool_size)
++{
++ uint32_t i;
++ crystalhd_elem_t *temp;
++
++ if (!adp || !pool_size)
++ return -EINVAL;
++
++ for (i = 0; i < pool_size; i++) {
++ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
++ if (!temp) {
++ BCMLOG_ERR("kalloc failed \n");
++ return -ENOMEM;
++ }
++ crystalhd_free_elem(adp, temp);
++ }
++ BCMLOG(BCMLOG_DBG, "allocated %d elem\n", pool_size);
++ return 0;
++}
++
++/**
++ * crystalhd_delete_elem_pool - List element pool deletion.
++ * @adp: Adapter instance
++ *
++ * Return:
++ * none
++ *
++ * Delete general purpose list element pool.
++ */
++void crystalhd_delete_elem_pool(struct crystalhd_adp *adp)
++{
++ crystalhd_elem_t *temp;
++ int dbg_cnt = 0;
++
++ if (!adp)
++ return;
++
++ do {
++ temp = crystalhd_alloc_elem(adp);
++ if (temp) {
++ kfree(temp);
++ dbg_cnt++;
++ }
++ } while (temp);
++
++ BCMLOG(BCMLOG_DBG, "released %d elem\n", dbg_cnt);
++}
++
++/*================ Debug support routines.. ================================*/
++void crystalhd_show_buffer(uint32_t off, uint8_t *buff, uint32_t dwcount)
++{
++ uint32_t i, k = 1;
++
++ for (i = 0; i < dwcount; i++) {
++ if (k == 1)
++ BCMLOG(BCMLOG_DATA, "0x%08X : ", off);
++
++ BCMLOG(BCMLOG_DATA, " 0x%08X ", *((uint32_t *)buff));
++
++ buff += sizeof(uint32_t);
++ off += sizeof(uint32_t);
++ k++;
++ if ((i == dwcount - 1) || (k > 4)) {
++ BCMLOG(BCMLOG_DATA, "\n");
++ k = 1;
++ }
++ }
++}
+diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
+new file mode 100644
+index 0000000..a2aa6ad
+--- /dev/null
++++ b/drivers/staging/crystalhd/crystalhd_misc.h
+@@ -0,0 +1,229 @@
++/***************************************************************************
++ * Copyright (c) 2005-2009, Broadcom Corporation.
++ *
++ * Name: crystalhd_misc . h
++ *
++ * Description:
++ * BCM70012 Linux driver general purpose routines.
++ * Includes reg/mem read and write routines.
++ *
++ * HISTORY:
++ *
++ **********************************************************************
++ * This file is part of the crystalhd device driver.
++ *
++ * This driver is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ * This driver is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
++ **********************************************************************/
++
++#ifndef _CRYSTALHD_MISC_H_
++#define _CRYSTALHD_MISC_H_
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/ioctl.h>
++#include <linux/dma-mapping.h>
++#include <linux/version.h>
++#include <linux/sched.h>
++#include <asm/system.h>
++#include "bc_dts_glob_lnx.h"
++
++/* Global log level variable defined in crystal_misc.c file */
++extern uint32_t g_linklog_level;
++
++/* Global element pool for all Queue management.
++ * TX: Active = BC_TX_LIST_CNT, Free = BC_TX_LIST_CNT.
++ * RX: Free = BC_RX_LIST_CNT, Active = 2
++ * FW-CMD: 4
++ */
++#define BC_LINK_ELEM_POOL_SZ ((BC_TX_LIST_CNT * 2) + BC_RX_LIST_CNT + 2 + 4)
++
++/* Driver's IODATA pool count */
++#define CHD_IODATA_POOL_SZ (BC_IOCTL_DATA_POOL_SIZE * BC_LINK_MAX_OPENS)
++
++/* Scatter Gather memory pool size for Tx and Rx */
++#define BC_LINK_SG_POOL_SZ (BC_TX_LIST_CNT + BC_RX_LIST_CNT)
++
++enum _crystalhd_dio_sig {
++ crystalhd_dio_inv = 0,
++ crystalhd_dio_locked,
++ crystalhd_dio_sg_mapped,
++};
++
++struct crystalhd_dio_user_info {
++ void *xfr_buff;
++ uint32_t xfr_len;
++ uint32_t uv_offset;
++ bool dir_tx;
++
++ uint32_t uv_sg_ix;
++ uint32_t uv_sg_off;
++ int comp_sts;
++ int ev_sts;
++ uint32_t y_done_sz;
++ uint32_t uv_done_sz;
++ uint32_t comp_flags;
++ bool b422mode;
++};
++
++typedef struct _crystalhd_dio_req {
++ uint32_t sig;
++ uint32_t max_pages;
++ struct page **pages;
++ struct scatterlist *sg;
++ int sg_cnt;
++ int page_cnt;
++ int direction;
++ struct crystalhd_dio_user_info uinfo;
++ void *fb_va;
++ uint32_t fb_size;
++ dma_addr_t fb_pa;
++ struct _crystalhd_dio_req *next;
++} crystalhd_dio_req;
++
++#define BC_LINK_DIOQ_SIG (0x09223280)
++
++typedef struct _crystalhd_elem_s {
++ struct _crystalhd_elem_s *flink;
++ struct _crystalhd_elem_s *blink;
++ void *data;
++ uint32_t tag;
++} crystalhd_elem_t;
++
++typedef void (*crystalhd_data_free_cb)(void *context, void *data);
++
++typedef struct _crystalhd_dioq_s {
++ uint32_t sig;
++ struct crystalhd_adp *adp;
++ crystalhd_elem_t *head;
++ crystalhd_elem_t *tail;
++ uint32_t count;
++ spinlock_t lock;
++ wait_queue_head_t event;
++ crystalhd_data_free_cb data_rel_cb;
++ void *cb_context;
++} crystalhd_dioq_t;
++
++typedef void (*hw_comp_callback)(crystalhd_dio_req *,
++ wait_queue_head_t *event, BC_STATUS sts);
++
++/*========= Decoder (7412) register access routines.================= */
++uint32_t bc_dec_reg_rd(struct crystalhd_adp *, uint32_t);
++void bc_dec_reg_wr(struct crystalhd_adp *, uint32_t, uint32_t);
++
++/*========= Link (70012) register access routines.. =================*/
++uint32_t crystalhd_reg_rd(struct crystalhd_adp *, uint32_t);
++void crystalhd_reg_wr(struct crystalhd_adp *, uint32_t, uint32_t);
++
++/*========= Decoder (7412) memory access routines..=================*/
++BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
++BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
++
++/*==========Link (70012) PCIe Config access routines.================*/
++BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
++BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t);
++
++/*========= Linux Kernel Interface routines. ======================= */
++void *bc_kern_dma_alloc(struct crystalhd_adp *, uint32_t, dma_addr_t *);
++void bc_kern_dma_free(struct crystalhd_adp *, uint32_t,
++ void *, dma_addr_t);
++#define crystalhd_create_event(_ev) init_waitqueue_head(_ev)
++#define crystalhd_set_event(_ev) wake_up_interruptible(_ev)
++#define crystalhd_wait_on_event(ev, condition, timeout, ret, nosig) \
++do { \
++ DECLARE_WAITQUEUE(entry, current); \
++ unsigned long end = jiffies + ((timeout * HZ) / 1000); \
++ ret = 0; \
++ add_wait_queue(ev, &entry); \
++ for (;;) { \
++ __set_current_state(TASK_INTERRUPTIBLE); \
++ if (condition) { \
++ break; \
++ } \
++ if (time_after_eq(jiffies, end)) { \
++ ret = -EBUSY; \
++ break; \
++ } \
++ schedule_timeout((HZ / 100 > 1) ? HZ / 100 : 1); \
++ if (!nosig && signal_pending(current)) { \
++ ret = -EINTR; \
++ break; \
++ } \
++ } \
++ __set_current_state(TASK_RUNNING); \
++ remove_wait_queue(ev, &entry); \
++} while (0)
++
++/*================ Direct IO mapping routines ==================*/
++extern int crystalhd_create_dio_pool(struct crystalhd_adp *, uint32_t);
++extern void crystalhd_destroy_dio_pool(struct crystalhd_adp *);
++extern BC_STATUS crystalhd_map_dio(struct crystalhd_adp *, void *, uint32_t,
++ uint32_t, bool, bool, crystalhd_dio_req**);
++
++extern BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *, crystalhd_dio_req*);
++#define crystalhd_get_sgle_paddr(_dio, _ix) (cpu_to_le64(sg_dma_address(&_dio->sg[_ix])))
++#define crystalhd_get_sgle_len(_dio, _ix) (cpu_to_le32(sg_dma_len(&_dio->sg[_ix])))
++
++/*================ General Purpose Queues ==================*/
++extern BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *, crystalhd_dioq_t **, crystalhd_data_free_cb , void *);
++extern void crystalhd_delete_dioq(struct crystalhd_adp *, crystalhd_dioq_t *);
++extern BC_STATUS crystalhd_dioq_add(crystalhd_dioq_t *ioq, void *data, bool wake, uint32_t tag);
++extern void *crystalhd_dioq_fetch(crystalhd_dioq_t *ioq);
++extern void *crystalhd_dioq_find_and_fetch(crystalhd_dioq_t *ioq, uint32_t tag);
++extern void *crystalhd_dioq_fetch_wait(crystalhd_dioq_t *ioq, uint32_t to_secs, uint32_t *sig_pend);
++
++#define crystalhd_dioq_count(_ioq) ((_ioq) ? _ioq->count : 0)
++
++extern int crystalhd_create_elem_pool(struct crystalhd_adp *, uint32_t);
++extern void crystalhd_delete_elem_pool(struct crystalhd_adp *);
++
++
++/*================ Debug routines/macros .. ================================*/
++extern void crystalhd_show_buffer(uint32_t off, uint8_t *buff, uint32_t dwcount);
++
++enum _chd_log_levels {
++ BCMLOG_ERROR = 0x80000000, /* Don't disable this option */
++ BCMLOG_DATA = 0x40000000, /* Data, enable by default */
++ BCMLOG_SPINLOCK = 0x20000000, /* Spcial case for Spin locks*/
++
++ /* Following are allowed only in debug mode */
++ BCMLOG_INFO = 0x00000001, /* Generic informational */
++ BCMLOG_DBG = 0x00000002, /* First level Debug info */
++ BCMLOG_SSTEP = 0x00000004, /* Stepping information */
++ BCMLOG_ENTER_LEAVE = 0x00000008, /* stack tracking */
++};
++
++#define BCMLOG_ENTER \
++if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
++ printk("Entered %s\n", __func__); \
++}
++
++#define BCMLOG_LEAVE \
++if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \
++ printk("Leaving %s\n", __func__); \
++}
++
++#define BCMLOG(trace, fmt, args...) \
++if (g_linklog_level & trace) { \
++ printk(fmt, ##args); \
++}
++
++#define BCMLOG_ERR(fmt, args...) \
++do { \
++ if (g_linklog_level & BCMLOG_ERROR) { \
++ printk("*ERR*:%s:%d: "fmt, __FILE__, __LINE__, ##args); \
++ } \
++} while (0);
++
++#endif
diff --git a/freed-ora/current/F-12/die-floppy-die.patch b/freed-ora/current/F-12/die-floppy-die.patch
new file mode 100644
index 000000000..26beabfc7
--- /dev/null
+++ b/freed-ora/current/F-12/die-floppy-die.patch
@@ -0,0 +1,18 @@
+Kill the floppy.ko pnp modalias. We were surviving just fine without
+autoloading floppy drivers, tyvm.
+
+Please feel free to register all complaints in the wastepaper bin.
+
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 91b7530..2ea84a6 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4631,7 +4631,7 @@ static const struct pnp_device_id floppy_pnpids[] = {
+ { "PNP0700", 0 },
+ { }
+ };
+-MODULE_DEVICE_TABLE(pnp, floppy_pnpids);
++/* MODULE_DEVICE_TABLE(pnp, floppy_pnpids); */
+
+ #else
+
diff --git a/freed-ora/current/F-12/drm-i915-add-reclaimable-to-page-allocations.patch b/freed-ora/current/F-12/drm-i915-add-reclaimable-to-page-allocations.patch
new file mode 100644
index 000000000..6014f2c15
--- /dev/null
+++ b/freed-ora/current/F-12/drm-i915-add-reclaimable-to-page-allocations.patch
@@ -0,0 +1,48 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 18 Jul 2010 16:44:37 +0000 (-0700)
+Subject: drm/i915: add 'reclaimable' to i915 self-reclaimable page allocations
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=cd9f040df6ce46573760a507cb88192d05d27d86
+
+drm/i915: add 'reclaimable' to i915 self-reclaimable page allocations
+
+The hibernate issues that got fixed in commit 985b823b9192 ("drm/i915:
+fix hibernation since i915 self-reclaim fixes") turn out to have been
+incomplete. Vefa Bicakci tested lots of hibernate cycles, and without
+the __GFP_RECLAIMABLE flag the system eventually fails to resume.
+
+With the flag added, Vefa can apparently hibernate forever (or until he
+gets bored running his automated scripts, whichever comes first).
+
+The reclaimable flag was there originally, and was one of the flags that
+were dropped (unintentionally) by commit 4bdadb978569 ("drm/i915:
+Selectively enable self-reclaim") that introduced all these problems,
+but I didn't want to just blindly add back all the flags in commit
+985b823b9192, and it looked like __GFP_RECLAIM wasn't necessary. It
+clearly was.
+
+I still suspect that there is some subtle reason we're missing that
+causes the problems, but __GFP_RECLAIMABLE is certainly not wrong to use
+in this context, and is what the code historically used. And we have no
+idea what the causes the corruption without it.
+
+Reported-and-tested-by: M. Vefa Bicakci <bicave@superonline.com>
+Cc: Dave Airlie <airlied@gmail.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 0743858..8757ecf 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2241,6 +2241,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER |
+ __GFP_COLD |
++ __GFP_RECLAIMABLE |
+ gfpmask);
+ if (IS_ERR(page))
+ goto err_pages;
diff --git a/freed-ora/current/F-12/drm-i915-fix-hibernate-memory-corruption.patch b/freed-ora/current/F-12/drm-i915-fix-hibernate-memory-corruption.patch
new file mode 100644
index 000000000..a9c2c18eb
--- /dev/null
+++ b/freed-ora/current/F-12/drm-i915-fix-hibernate-memory-corruption.patch
@@ -0,0 +1,41 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 2 Jul 2010 00:04:42 +0000 (+1000)
+Subject: drm/i915: fix hibernation since i915 self-reclaim fixes
+X-Git-Tag: v2.6.35-rc4~13
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=985b823b919273fe1327d56d2196b4f92e5d0fae
+
+drm/i915: fix hibernation since i915 self-reclaim fixes
+
+Since commit 4bdadb9785696439c6e2b3efe34aa76df1149c83 ("drm/i915:
+Selectively enable self-reclaim"), we've been passing GFP_MOVABLE to the
+i915 page allocator where we weren't before due to some over-eager
+removal of the page mapping gfp_flags games the code used to play.
+
+This caused hibernate on Intel hardware to result in a lot of memory
+corruptions on resume. See for example
+
+ http://bugzilla.kernel.org/show_bug.cgi?id=13811
+
+Reported-by: Evengi Golov (in bugzilla)
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Tested-by: M. Vefa Bicakci <bicave@superonline.com>
+Cc: stable@kernel.org
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 9ded3da..0743858 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2239,7 +2239,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
+ mapping = inode->i_mapping;
+ for (i = 0; i < page_count; i++) {
+ page = read_cache_page_gfp(mapping, i,
+- mapping_gfp_mask (mapping) |
++ GFP_HIGHUSER |
+ __GFP_COLD |
+ gfpmask);
+ if (IS_ERR(page))
diff --git a/freed-ora/current/F-12/drm-i915-resume-force-mode.patch b/freed-ora/current/F-12/drm-i915-resume-force-mode.patch
new file mode 100644
index 000000000..3e6c6484b
--- /dev/null
+++ b/freed-ora/current/F-12/drm-i915-resume-force-mode.patch
@@ -0,0 +1,50 @@
+http://lists.freedesktop.org/archives/intel-gfx/2009-February/001313.html
+
+--- a/drivers/gpu/drm/i915/i915_suspend.c.orig 2009-02-18 22:59:19.000000000 -0500
++++ b/drivers/gpu/drm/i915/i915_suspend.c 2009-02-18 22:59:58.000000000 -0500
+@@ -28,6 +28,7 @@
+ #include "drm.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
++#include <drm/drm_crtc_helper.h>
+
+ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+ {
+@@ -519,6 +520,8 @@
+
+ i915_restore_vga(dev);
+
++ drm_helper_resume_force_mode(dev);
++
+ return 0;
+ }
+
+From f5192bce8be69e5b33d7579bc282fef4d673e2c1 Mon Sep 17 00:00:00 2001
+From: Lubomir Rintel <lkundrak@v3.sk>
+Date: Sun, 15 Mar 2009 13:55:55 +0100
+Subject: [PATCH] Fix i915 nomodeset NULL deref. during PM resume
+
+drm_helper_resume_force_mode() would crash while attempting to
+iterate through crtc_list, which is uninitialized when is modesetting
+disabled.
+---
+ drivers/gpu/drm/i915/i915_suspend.c | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index ef5fb6e..b138032 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -520,7 +520,8 @@ int i915_restore_state(struct drm_device *dev)
+
+ i915_restore_vga(dev);
+
+- drm_helper_resume_force_mode(dev);
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ drm_helper_resume_force_mode(dev);
+
+ return 0;
+ }
+--
+1.6.2
+
diff --git a/freed-ora/current/F-12/drm-intel-945gm-stability-fixes.patch b/freed-ora/current/F-12/drm-intel-945gm-stability-fixes.patch
new file mode 100644
index 000000000..ff661cf0a
--- /dev/null
+++ b/freed-ora/current/F-12/drm-intel-945gm-stability-fixes.patch
@@ -0,0 +1,117 @@
+upstream commit 944001201ca0196bcdb088129e5866a9f379d08c
+(plus some defines)
+[2.6.32 backport]
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 0d05c6f..b87f65d 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -4967,6 +4967,16 @@ i915_gem_load(struct drm_device *dev)
+ list_add(&dev_priv->mm.shrink_list, &shrink_list);
+ spin_unlock(&shrink_list_lock);
+
++ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
++ if (IS_GEN3(dev)) {
++ u32 tmp = I915_READ(MI_ARB_STATE);
++ if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
++ /* arb state is a masked write, so set bit + bit in mask */
++ tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
++ I915_WRITE(MI_ARB_STATE, tmp);
++ }
++ }
++
+ /* Old X drivers will take 0-2 for front, back, depth buffers */
+ dev_priv->fence_reg_start = 3;
+
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4cbc521..4543975 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -357,6 +357,70 @@
+ #define LM_BURST_LENGTH 0x00000700
+ #define LM_FIFO_WATERMARK 0x0000001F
+ #define MI_ARB_STATE 0x020e4 /* 915+ only */
++#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
++
++/* Make render/texture TLB fetches lower priorty than associated data
++ * fetches. This is not turned on by default
++ */
++#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
++
++/* Isoch request wait on GTT enable (Display A/B/C streams).
++ * Make isoch requests stall on the TLB update. May cause
++ * display underruns (test mode only)
++ */
++#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
++
++/* Block grant count for isoch requests when block count is
++ * set to a finite value.
++ */
++#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
++#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
++#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
++#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
++#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
++
++/* Enable render writes to complete in C2/C3/C4 power states.
++ * If this isn't enabled, render writes are prevented in low
++ * power states. That seems bad to me.
++ */
++#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
++
++/* This acknowledges an async flip immediately instead
++ * of waiting for 2TLB fetches.
++ */
++#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
++
++/* Enables non-sequential data reads through arbiter
++ */
++#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
++
++/* Disable FSB snooping of cacheable write cycles from binner/render
++ * command stream
++ */
++#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
++
++/* Arbiter time slice for non-isoch streams */
++#define MI_ARB_TIME_SLICE_MASK (7 << 5)
++#define MI_ARB_TIME_SLICE_1 (0 << 5)
++#define MI_ARB_TIME_SLICE_2 (1 << 5)
++#define MI_ARB_TIME_SLICE_4 (2 << 5)
++#define MI_ARB_TIME_SLICE_6 (3 << 5)
++#define MI_ARB_TIME_SLICE_8 (4 << 5)
++#define MI_ARB_TIME_SLICE_10 (5 << 5)
++#define MI_ARB_TIME_SLICE_14 (6 << 5)
++#define MI_ARB_TIME_SLICE_16 (7 << 5)
++
++/* Low priority grace period page size */
++#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
++#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
++
++/* Disable display A/B trickle feed */
++#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
++
++/* Set display plane priority */
++#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
++#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
++
+ #define CACHE_MODE_0 0x02120 /* 915+ only */
+ #define CM0_MASK_SHIFT 16
+ #define CM0_IZ_OPT_DISABLE (1<<6)
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1045,6 +1045,13 @@ extern int i915_wait_ring(struct drm_dev
+ #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
+ #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+
++#define IS_GEN3(dev) (IS_I915G(dev) || \
++ IS_I915GM(dev) || \
++ IS_I945G(dev) || \
++ IS_I945GM(dev) || \
++ IS_G33(dev) || \
++ IS_PINEVIEW(dev))
++
+ #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+ /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+--
diff --git a/freed-ora/current/F-12/drm-intel-acpi-populate-didl.patch b/freed-ora/current/F-12/drm-intel-acpi-populate-didl.patch
new file mode 100644
index 000000000..069f2769a
--- /dev/null
+++ b/freed-ora/current/F-12/drm-intel-acpi-populate-didl.patch
@@ -0,0 +1,70 @@
+diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/i915_opregion.c.orig linux-2.6.33.noarch/drivers/gpu/drm/i915/i915_opregion.c
+--- linux-2.6.33.noarch/drivers/gpu/drm/i915/i915_opregion.c.orig 2010-02-24 13:52:17.000000000 -0500
++++ linux-2.6.33.noarch/drivers/gpu/drm/i915/i915_opregion.c 2010-04-01 10:35:35.249121262 -0400
+@@ -382,8 +382,54 @@ static void intel_didl_outputs(struct dr
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ struct drm_connector *connector;
++ acpi_handle handle;
++ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
++ unsigned long long device_id;
++ acpi_status status;
+ int i = 0;
+
++ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
++ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
++ return;
++
++ if (acpi_is_video_device(acpi_dev))
++ acpi_video_bus = acpi_dev;
++ else {
++ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
++ if (acpi_is_video_device(acpi_cdev)) {
++ acpi_video_bus = acpi_cdev;
++ break;
++ }
++ }
++ }
++
++ if (!acpi_video_bus)
++ goto blind_set;
++
++ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
++ if (i >= 8) {
++ dev_printk (KERN_ERR, &dev->pdev->dev,
++ "More than 8 outputs detected\n");
++ return;
++ }
++ status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
++ NULL, &device_id);
++ if (ACPI_SUCCESS(status)) {
++ if (!device_id)
++ goto blind_set;
++ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
++ i++;
++ }
++ }
++
++end:
++ /* If fewer than 8 outputs, the list must be null terminated */
++ if (i < 8)
++ opregion->acpi->didl[i] = 0;
++ return;
++
++blind_set:
++ i = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ int output_type = ACPI_OTHER_OUTPUT;
+ if (i >= 8) {
+@@ -416,10 +462,7 @@ static void intel_didl_outputs(struct dr
+ opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+ i++;
+ }
+-
+- /* If fewer than 8 outputs, the list must be null terminated */
+- if (i < 8)
+- opregion->acpi->didl[i] = 0;
++ goto end;
+ }
+
+ int intel_opregion_init(struct drm_device *dev, int resume)
diff --git a/freed-ora/current/F-12/drm-intel-big-hammer.patch b/freed-ora/current/F-12/drm-intel-big-hammer.patch
new file mode 100644
index 000000000..e7047508d
--- /dev/null
+++ b/freed-ora/current/F-12/drm-intel-big-hammer.patch
@@ -0,0 +1,16 @@
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 37427e4..08af9db 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2553,6 +2553,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+
+ mutex_lock(&dev->struct_mutex);
+
++ /* We don't get the flushing right for these chipsets, use the
++ * big hamer for now to avoid random crashiness. */
++ if (IS_I855(dev) || IS_I865G(dev))
++ wbinvd();
++
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ if (dev_priv->mm.wedged) {
diff --git a/freed-ora/current/F-12/drm-intel-make-lvds-work.patch b/freed-ora/current/F-12/drm-intel-make-lvds-work.patch
new file mode 100644
index 000000000..af3fd8f9f
--- /dev/null
+++ b/freed-ora/current/F-12/drm-intel-make-lvds-work.patch
@@ -0,0 +1,19 @@
+diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.old linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c
+--- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.old 2010-03-31 15:51:11.798876290 -0400
++++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c 2010-03-31 16:01:18.342747791 -0400
+@@ -3742,7 +3742,6 @@ struct drm_crtc *intel_get_load_detect_p
+ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
+ {
+ struct drm_encoder *encoder = &intel_output->enc;
+- struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+@@ -3752,7 +3751,6 @@ void intel_release_load_detect_pipe(stru
+ intel_output->base.encoder = NULL;
+ intel_output->load_detect_temp = false;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+- drm_helper_disable_unused_functions(dev);
+ }
+
+ /* Switch crtc and output back off if necessary */
diff --git a/freed-ora/current/F-12/drm-intel-next.patch b/freed-ora/current/F-12/drm-intel-next.patch
new file mode 100644
index 000000000..c6cac6926
--- /dev/null
+++ b/freed-ora/current/F-12/drm-intel-next.patch
@@ -0,0 +1 @@
+empty
diff --git a/freed-ora/current/F-12/drm-intel-no-tv-hotplug.patch b/freed-ora/current/F-12/drm-intel-no-tv-hotplug.patch
new file mode 100644
index 000000000..9de59b374
--- /dev/null
+++ b/freed-ora/current/F-12/drm-intel-no-tv-hotplug.patch
@@ -0,0 +1,11 @@
+diff -up linux-2.6.31.noarch/drivers/gpu/drm/i915/i915_reg.h.jx linux-2.6.31.noarch/drivers/gpu/drm/i915/i915_reg.h
+--- linux-2.6.31.noarch/drivers/gpu/drm/i915/i915_reg.h.jx 2009-09-16 13:36:20.000000000 -0400
++++ linux-2.6.31.noarch/drivers/gpu/drm/i915/i915_reg.h 2009-09-16 13:40:32.000000000 -0400
+@@ -836,7 +836,6 @@
+ HDMID_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+- TV_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
+
+
diff --git a/freed-ora/current/F-12/drm-next.patch b/freed-ora/current/F-12/drm-next.patch
deleted file mode 100644
index 96152e045..000000000
--- a/freed-ora/current/F-12/drm-next.patch
+++ /dev/null
@@ -1,30739 +0,0 @@
-Deblobbed:
-
-* drivers/gpu/drm/mga/mga_warp.c: Clean-up non-Free firmware names.
-* drivers/gpu/drm/r128/r128_cce.c: Likewise. Adjust for prior deblobbing.
-* drivers/gpu/drm/radeon/r100.c: Likewise.
-* drivers/gpu/drm/radeon/r600.c: Likewise.
-* drivers/gpu/drm/radeon/radeon_cp.c: Likewise.
-* drivers/gpu/drm/radeon/r600_cp.c: Likewise.
-* drivers/gpu/drm/mga/mga_ucode.h: Adjust for prior deblobbing.
-* drivers/gpu/drm/radeon/radeon_microcode.h: Likewise.
-* drivers/gpu/drm/radeon/r600_microcode.h: Likewise.
-* firmware/*: Deblobbed.
-* drivers/gpu/drm/Kconfig: Reenable drivers that gained load error recovery.
-
-diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
-index 39b393d..e4d971c 100644
---- a/drivers/gpu/drm/Kconfig
-+++ b/drivers/gpu/drm/Kconfig
-@@ -18,6 +18,14 @@ menuconfig DRM
- details. You should also select and configure AGP
- (/dev/agpgart) support.
-
-+config DRM_KMS_HELPER
-+ tristate
-+ depends on DRM
-+ select FB
-+ select FRAMEBUFFER_CONSOLE if !EMBEDDED
-+ help
-+ FB and CRTC helpers for KMS drivers.
-+
- config DRM_TTM
- tristate
- depends on DRM
-@@ -36,9 +44,9 @@ config DRM_TDFX
- graphics card. If M is selected, the module will be called tdfx.
-
- config DRM_R128
--depends on NONFREE
- tristate "ATI Rage 128"
- depends on DRM && PCI
-+ select FW_LOADER
- help
- Choose this option if you have an ATI Rage 128 graphics card. If M
- is selected, the module will be called r128. AGP support for
-@@ -47,14 +56,14 @@ config DRM_RADEON
- this card is strongly suggested (unless you have a PCI version).
-
- config DRM_RADEON
--depends on NONFREE
- tristate "ATI Radeon"
- depends on DRM && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
-- select FB
-- select FRAMEBUFFER_CONSOLE if !EMBEDDED
-+ select FW_LOADER
-+ select DRM_KMS_HELPER
-+ select DRM_TTM
- help
- Choose this option if you have an ATI Radeon graphics card. There
- are both PCI and AGP versions. You don't need to choose this to
-@@ -82,11 +92,10 @@ config DRM_I830
- config DRM_I915
- tristate "i915 driver"
- depends on AGP_INTEL
-+ select DRM_KMS_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
-- select FB
-- select FRAMEBUFFER_CONSOLE if !EMBEDDED
- # i915 depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select VIDEO_OUTPUT_CONTROL if ACPI
-@@ -116,9 +125,9 @@ endchoice
- endchoice
-
- config DRM_MGA
--depends on NONFREE
- tristate "Matrox g200/g400"
- depends on DRM
-+ select FW_LOADER
- help
- Choose this option if you have a Matrox G200, G400 or G450 graphics
- card. If M is selected, the module will be called mga. AGP
-diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
-index fe23f29..3c8827a 100644
---- a/drivers/gpu/drm/Makefile
-+++ b/drivers/gpu/drm/Makefile
-@@ -10,11 +10,15 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
- drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
-- drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
-- drm_info.o drm_debugfs.o
-+ drm_crtc.o drm_modes.o drm_edid.o \
-+ drm_info.o drm_debugfs.o drm_encoder_slave.o
-
- drm-$(CONFIG_COMPAT) += drm_ioc32.o
-
-+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o
-+
-+obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
-+
- obj-$(CONFIG_DRM) += drm.o
- obj-$(CONFIG_DRM_TTM) += ttm/
- obj-$(CONFIG_DRM_TDFX) += tdfx/
-diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
-index 0e994a0..0e3bd5b 100644
---- a/drivers/gpu/drm/drm_cache.c
-+++ b/drivers/gpu/drm/drm_cache.c
-@@ -45,6 +45,23 @@ drm_clflush_page(struct page *page)
- clflush(page_virtual + i);
- kunmap_atomic(page_virtual, KM_USER0);
- }
-+
-+static void drm_cache_flush_clflush(struct page *pages[],
-+ unsigned long num_pages)
-+{
-+ unsigned long i;
-+
-+ mb();
-+ for (i = 0; i < num_pages; i++)
-+ drm_clflush_page(*pages++);
-+ mb();
-+}
-+
-+static void
-+drm_clflush_ipi_handler(void *null)
-+{
-+ wbinvd();
-+}
- #endif
-
- void
-@@ -53,17 +70,30 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
-
- #if defined(CONFIG_X86)
- if (cpu_has_clflush) {
-- unsigned long i;
--
-- mb();
-- for (i = 0; i < num_pages; ++i)
-- drm_clflush_page(*pages++);
-- mb();
--
-+ drm_cache_flush_clflush(pages, num_pages);
- return;
- }
-
-- wbinvd();
-+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
-+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
-+
-+#elif defined(__powerpc__)
-+ unsigned long i;
-+ for (i = 0; i < num_pages; i++) {
-+ struct page *page = pages[i];
-+ void *page_virtual;
-+
-+ if (unlikely(page == NULL))
-+ continue;
-+
-+ page_virtual = kmap_atomic(page, KM_USER0);
-+ flush_dcache_range((unsigned long)page_virtual,
-+ (unsigned long)page_virtual + PAGE_SIZE);
-+ kunmap_atomic(page_virtual, KM_USER0);
-+ }
-+#else
-+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
-+ WARN_ON_ONCE(1);
- #endif
- }
- EXPORT_SYMBOL(drm_clflush_pages);
-diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
-index 2f631c7..ba728ad 100644
---- a/drivers/gpu/drm/drm_crtc.c
-+++ b/drivers/gpu/drm/drm_crtc.c
-@@ -68,10 +68,10 @@ DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
- */
- static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
- {
-- { DRM_MODE_SCALE_NON_GPU, "Non-GPU" },
-- { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" },
-- { DRM_MODE_SCALE_NO_SCALE, "No scale" },
-- { DRM_MODE_SCALE_ASPECT, "Aspect" },
-+ { DRM_MODE_SCALE_NONE, "None" },
-+ { DRM_MODE_SCALE_FULLSCREEN, "Full" },
-+ { DRM_MODE_SCALE_CENTER, "Center" },
-+ { DRM_MODE_SCALE_ASPECT, "Full aspect" },
- };
-
- static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-@@ -108,6 +108,7 @@ static struct drm_prop_enum_list drm_tv_select_enum_list[] =
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
-+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
- };
-
- DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-@@ -118,6 +119,7 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
-+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
- };
-
- DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
-@@ -146,6 +148,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
- { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
-+ { DRM_MODE_CONNECTOR_TV, "TV", 0 },
- };
-
- static struct drm_prop_enum_list drm_encoder_enum_list[] =
-@@ -165,6 +168,7 @@ char *drm_get_encoder_name(struct drm_encoder *encoder)
- encoder->base.id);
- return buf;
- }
-+EXPORT_SYMBOL(drm_get_encoder_name);
-
- char *drm_get_connector_name(struct drm_connector *connector)
- {
-@@ -699,6 +703,42 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
- drm_property_add_enum(dev->mode_config.tv_mode_property, i,
- i, modes[i]);
-
-+ dev->mode_config.tv_brightness_property =
-+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
-+ "brightness", 2);
-+ dev->mode_config.tv_brightness_property->values[0] = 0;
-+ dev->mode_config.tv_brightness_property->values[1] = 100;
-+
-+ dev->mode_config.tv_contrast_property =
-+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
-+ "contrast", 2);
-+ dev->mode_config.tv_contrast_property->values[0] = 0;
-+ dev->mode_config.tv_contrast_property->values[1] = 100;
-+
-+ dev->mode_config.tv_flicker_reduction_property =
-+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
-+ "flicker reduction", 2);
-+ dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
-+ dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
-+
-+ dev->mode_config.tv_overscan_property =
-+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
-+ "overscan", 2);
-+ dev->mode_config.tv_overscan_property->values[0] = 0;
-+ dev->mode_config.tv_overscan_property->values[1] = 100;
-+
-+ dev->mode_config.tv_saturation_property =
-+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
-+ "saturation", 2);
-+ dev->mode_config.tv_saturation_property->values[0] = 0;
-+ dev->mode_config.tv_saturation_property->values[1] = 100;
-+
-+ dev->mode_config.tv_hue_property =
-+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
-+ "hue", 2);
-+ dev->mode_config.tv_hue_property->values[0] = 0;
-+ dev->mode_config.tv_hue_property->values[1] = 100;
-+
- return 0;
- }
- EXPORT_SYMBOL(drm_mode_create_tv_properties);
-@@ -1044,7 +1084,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
- if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- head) {
-- DRM_DEBUG("CRTC ID is %d\n", crtc->base.id);
-+ DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
-@@ -1072,7 +1112,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- head) {
-- DRM_DEBUG("ENCODER ID is %d\n",
-+ DRM_DEBUG_KMS("ENCODER ID is %d\n",
- encoder->base.id);
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
-@@ -1103,7 +1143,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
-- DRM_DEBUG("CONNECTOR ID is %d\n",
-+ DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
- connector->base.id);
- if (put_user(connector->base.id,
- connector_id + copied)) {
-@@ -1127,7 +1167,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
- }
- card_res->count_connectors = connector_count;
-
-- DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
-+ DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
- card_res->count_connectors, card_res->count_encoders);
-
- out:
-@@ -1230,7 +1270,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
-
- memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-
-- DRM_DEBUG("connector id %d:\n", out_resp->connector_id);
-+ DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
-
- mutex_lock(&dev->mode_config.mutex);
-
-@@ -1406,7 +1446,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
- obj = drm_mode_object_find(dev, crtc_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
-- DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
-+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
- ret = -EINVAL;
- goto out;
- }
-@@ -1419,7 +1459,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
- list_for_each_entry(crtcfb,
- &dev->mode_config.crtc_list, head) {
- if (crtcfb == crtc) {
-- DRM_DEBUG("Using current fb for setmode\n");
-+ DRM_DEBUG_KMS("Using current fb for "
-+ "setmode\n");
- fb = crtc->fb;
- }
- }
-@@ -1427,7 +1468,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
- obj = drm_mode_object_find(dev, crtc_req->fb_id,
- DRM_MODE_OBJECT_FB);
- if (!obj) {
-- DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
-+ DRM_DEBUG_KMS("Unknown FB ID%d\n",
-+ crtc_req->fb_id);
- ret = -EINVAL;
- goto out;
- }
-@@ -1440,13 +1482,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
- }
-
- if (crtc_req->count_connectors == 0 && mode) {
-- DRM_DEBUG("Count connectors is 0 but mode set\n");
-+ DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
-- DRM_DEBUG("Count connectors is %d but no mode or fb set\n",
-+ DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
- crtc_req->count_connectors);
- ret = -EINVAL;
- goto out;
-@@ -1479,7 +1521,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
- obj = drm_mode_object_find(dev, out_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
-- DRM_DEBUG("Connector id %d unknown\n", out_id);
-+ DRM_DEBUG_KMS("Connector id %d unknown\n",
-+ out_id);
- ret = -EINVAL;
- goto out;
- }
-@@ -1512,7 +1555,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
- struct drm_crtc *crtc;
- int ret = 0;
-
-- DRM_DEBUG("\n");
-+ DRM_DEBUG_KMS("\n");
-
- if (!req->flags) {
- DRM_ERROR("no operation set\n");
-@@ -1522,7 +1565,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
- mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
-- DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id);
-+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- ret = -EINVAL;
- goto out;
- }
-diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
-index 6aaa2cb..ff447f1 100644
---- a/drivers/gpu/drm/drm_crtc_helper.c
-+++ b/drivers/gpu/drm/drm_crtc_helper.c
-@@ -33,15 +33,6 @@
- #include "drm_crtc.h"
- #include "drm_crtc_helper.h"
-
--/*
-- * Detailed mode info for 800x600@60Hz
-- */
--static struct drm_display_mode std_modes[] = {
-- { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840,
-- 968, 1056, 0, 600, 601, 605, 628, 0,
-- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
--};
--
- static void drm_mode_validate_flag(struct drm_connector *connector,
- int flags)
- {
-@@ -94,7 +85,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
- int count = 0;
- int mode_flags = 0;
-
-- DRM_DEBUG("%s\n", drm_get_connector_name(connector));
-+ DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector));
- /* set all modes to the unverified state */
- list_for_each_entry_safe(mode, t, &connector->modes, head)
- mode->status = MODE_UNVERIFIED;
-@@ -102,15 +93,17 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
- connector->status = connector->funcs->detect(connector);
-
- if (connector->status == connector_status_disconnected) {
-- DRM_DEBUG("%s is disconnected\n",
-+ DRM_DEBUG_KMS("%s is disconnected\n",
- drm_get_connector_name(connector));
-- /* TODO set EDID to NULL */
-- return 0;
-+ goto prune;
- }
-
- count = (*connector_funcs->get_modes)(connector);
-- if (!count)
-- return 0;
-+ if (!count) {
-+ count = drm_add_modes_noedid(connector, 800, 600);
-+ if (!count)
-+ return 0;
-+ }
-
- drm_mode_connector_list_update(connector);
-
-@@ -130,7 +123,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
- mode);
- }
-
--
-+prune:
- drm_mode_prune_invalid(dev, &connector->modes, true);
-
- if (list_empty(&connector->modes))
-@@ -138,7 +131,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
-
- drm_mode_sort(&connector->modes);
-
-- DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector));
-+ DRM_DEBUG_KMS("Probed modes for %s\n",
-+ drm_get_connector_name(connector));
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
-
-@@ -165,39 +159,6 @@ int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
- }
- EXPORT_SYMBOL(drm_helper_probe_connector_modes);
-
--static void drm_helper_add_std_modes(struct drm_device *dev,
-- struct drm_connector *connector)
--{
-- struct drm_display_mode *mode, *t;
-- int i;
--
-- for (i = 0; i < ARRAY_SIZE(std_modes); i++) {
-- struct drm_display_mode *stdmode;
--
-- /*
-- * When no valid EDID modes are available we end up
-- * here and bailed in the past, now we add some standard
-- * modes and move on.
-- */
-- stdmode = drm_mode_duplicate(dev, &std_modes[i]);
-- drm_mode_probed_add(connector, stdmode);
-- drm_mode_list_concat(&connector->probed_modes,
-- &connector->modes);
--
-- DRM_DEBUG("Adding mode %s to %s\n", stdmode->name,
-- drm_get_connector_name(connector));
-- }
-- drm_mode_sort(&connector->modes);
--
-- DRM_DEBUG("Added std modes on %s\n", drm_get_connector_name(connector));
-- list_for_each_entry_safe(mode, t, &connector->modes, head) {
-- mode->vrefresh = drm_mode_vrefresh(mode);
--
-- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-- drm_mode_debug_printmodeline(mode);
-- }
--}
--
- /**
- * drm_helper_encoder_in_use - check if a given encoder is in use
- * @encoder: encoder to check
-@@ -258,13 +219,27 @@ EXPORT_SYMBOL(drm_helper_crtc_in_use);
- void drm_helper_disable_unused_functions(struct drm_device *dev)
- {
- struct drm_encoder *encoder;
-+ struct drm_connector *connector;
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_crtc *crtc;
-
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ if (!connector->encoder)
-+ continue;
-+ if (connector->status == connector_status_disconnected)
-+ connector->encoder = NULL;
-+ }
-+
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder_funcs = encoder->helper_private;
-- if (!drm_helper_encoder_in_use(encoder))
-- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
-+ if (!drm_helper_encoder_in_use(encoder)) {
-+ if (encoder_funcs->disable)
-+ (*encoder_funcs->disable)(encoder);
-+ else
-+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
-+ /* disconnector encoder from any connector */
-+ encoder->crtc = NULL;
-+ }
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-@@ -312,7 +287,7 @@ static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, true);
-- DRM_DEBUG("connector %d enabled? %s\n", connector->base.id,
-+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
- any_enabled |= enabled[i];
- i++;
-@@ -342,7 +317,7 @@ static bool drm_target_preferred(struct drm_device *dev,
- continue;
- }
-
-- DRM_DEBUG("looking for preferred mode on connector %d\n",
-+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- connector->base.id);
-
- modes[i] = drm_has_preferred_mode(connector, width, height);
-@@ -351,7 +326,7 @@ static bool drm_target_preferred(struct drm_device *dev,
- list_for_each_entry(modes[i], &connector->modes, head)
- break;
- }
-- DRM_DEBUG("found mode %s\n", modes[i] ? modes[i]->name :
-+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- i++;
- }
-@@ -409,7 +384,7 @@ static int drm_pick_crtcs(struct drm_device *dev,
- c = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
-- if ((connector->encoder->possible_crtcs & (1 << c)) == 0) {
-+ if ((encoder->possible_crtcs & (1 << c)) == 0) {
- c++;
- continue;
- }
-@@ -452,7 +427,7 @@ static void drm_setup_crtcs(struct drm_device *dev)
- int width, height;
- int i, ret;
-
-- DRM_DEBUG("\n");
-+ DRM_DEBUG_KMS("\n");
-
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
-@@ -475,7 +450,7 @@ static void drm_setup_crtcs(struct drm_device *dev)
- if (!ret)
- DRM_ERROR("Unable to find initial modes\n");
-
-- DRM_DEBUG("picking CRTCs for %dx%d config\n", width, height);
-+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
- drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
-@@ -490,12 +465,14 @@ static void drm_setup_crtcs(struct drm_device *dev)
- }
-
- if (mode && crtc) {
-- DRM_DEBUG("desired mode %s set on crtc %d\n",
-+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, crtc->base.id);
- crtc->desired_mode = mode;
- connector->encoder->crtc = crtc;
-- } else
-+ } else {
- connector->encoder->crtc = NULL;
-+ connector->encoder = NULL;
-+ }
- i++;
- }
-
-@@ -702,18 +679,17 @@ EXPORT_SYMBOL(drm_crtc_helper_set_mode);
- int drm_crtc_helper_set_config(struct drm_mode_set *set)
- {
- struct drm_device *dev;
-- struct drm_crtc **save_crtcs, *new_crtc;
-- struct drm_encoder **save_encoders, *new_encoder;
-+ struct drm_crtc *save_crtcs, *new_crtc, *crtc;
-+ struct drm_encoder *save_encoders, *new_encoder, *encoder;
- struct drm_framebuffer *old_fb = NULL;
-- bool save_enabled;
- bool mode_changed = false; /* if true do a full mode set */
- bool fb_changed = false; /* if true and !mode_changed just do a flip */
-- struct drm_connector *connector;
-+ struct drm_connector *save_connectors, *connector;
- int count = 0, ro, fail = 0;
- struct drm_crtc_helper_funcs *crtc_funcs;
- int ret = 0;
-
-- DRM_DEBUG("\n");
-+ DRM_DEBUG_KMS("\n");
-
- if (!set)
- return -EINVAL;
-@@ -726,37 +702,60 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
-
- crtc_funcs = set->crtc->helper_private;
-
-- DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n",
-+ DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:"
-+ " %d (x, y) (%i, %i)\n",
- set->crtc, set->crtc->base.id, set->fb, set->connectors,
- (int)set->num_connectors, set->x, set->y);
-
- dev = set->crtc->dev;
-
-- /* save previous config */
-- save_enabled = set->crtc->enabled;
--
-- /*
-- * We do mode_config.num_connectors here since we'll look at the
-- * CRTC and encoder associated with each connector later.
-- */
-- save_crtcs = kzalloc(dev->mode_config.num_connector *
-- sizeof(struct drm_crtc *), GFP_KERNEL);
-+ /* Allocate space for the backup of all (non-pointer) crtc, encoder and
-+ * connector data. */
-+ save_crtcs = kzalloc(dev->mode_config.num_crtc *
-+ sizeof(struct drm_crtc), GFP_KERNEL);
- if (!save_crtcs)
- return -ENOMEM;
-
-- save_encoders = kzalloc(dev->mode_config.num_connector *
-- sizeof(struct drm_encoders *), GFP_KERNEL);
-+ save_encoders = kzalloc(dev->mode_config.num_encoder *
-+ sizeof(struct drm_encoder), GFP_KERNEL);
- if (!save_encoders) {
- kfree(save_crtcs);
- return -ENOMEM;
- }
-
-+ save_connectors = kzalloc(dev->mode_config.num_connector *
-+ sizeof(struct drm_connector), GFP_KERNEL);
-+ if (!save_connectors) {
-+ kfree(save_crtcs);
-+ kfree(save_encoders);
-+ return -ENOMEM;
-+ }
-+
-+ /* Copy data. Note that driver private data is not affected.
-+ * Should anything bad happen only the expected state is
-+ * restored, not the drivers personal bookkeeping.
-+ */
-+ count = 0;
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ save_crtcs[count++] = *crtc;
-+ }
-+
-+ count = 0;
-+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+ save_encoders[count++] = *encoder;
-+ }
-+
-+ count = 0;
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ save_connectors[count++] = *connector;
-+ }
-+
- /* We should be able to check here if the fb has the same properties
- * and then just flip_or_move it */
- if (set->crtc->fb != set->fb) {
- /* If we have no fb then treat it as a full mode set */
- if (set->crtc->fb == NULL) {
-- DRM_DEBUG("crtc has no fb, full mode set\n");
-+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
- mode_changed = true;
- } else if (set->fb == NULL) {
- mode_changed = true;
-@@ -772,7 +771,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
- fb_changed = true;
-
- if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
-- DRM_DEBUG("modes are different, full mode set\n");
-+ DRM_DEBUG_KMS("modes are different, full mode set\n");
- drm_mode_debug_printmodeline(&set->crtc->mode);
- drm_mode_debug_printmodeline(set->mode);
- mode_changed = true;
-@@ -783,7 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_connector_helper_funcs *connector_funcs =
- connector->helper_private;
-- save_encoders[count++] = connector->encoder;
- new_encoder = connector->encoder;
- for (ro = 0; ro < set->num_connectors; ro++) {
- if (set->connectors[ro] == connector) {
-@@ -798,15 +796,20 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
- }
-
- if (new_encoder != connector->encoder) {
-- DRM_DEBUG("encoder changed, full mode switch\n");
-+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
- mode_changed = true;
-+ /* If the encoder is reused for another connector, then
-+ * the appropriate crtc will be set later.
-+ */
-+ if (connector->encoder)
-+ connector->encoder->crtc = NULL;
- connector->encoder = new_encoder;
- }
- }
-
- if (fail) {
- ret = -EINVAL;
-- goto fail_no_encoder;
-+ goto fail;
- }
-
- count = 0;
-@@ -814,8 +817,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
- if (!connector->encoder)
- continue;
-
-- save_crtcs[count++] = connector->encoder->crtc;
--
- if (connector->encoder->crtc == set->crtc)
- new_crtc = NULL;
- else
-@@ -830,14 +831,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
- if (new_crtc &&
- !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
- ret = -EINVAL;
-- goto fail_set_mode;
-+ goto fail;
- }
- if (new_crtc != connector->encoder->crtc) {
-- DRM_DEBUG("crtc changed, full mode switch\n");
-+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
- mode_changed = true;
- connector->encoder->crtc = new_crtc;
- }
-- DRM_DEBUG("setting connector %d crtc to %p\n",
-+ DRM_DEBUG_KMS("setting connector %d crtc to %p\n",
- connector->base.id, new_crtc);
- }
-
-@@ -850,7 +851,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
- set->crtc->fb = set->fb;
- set->crtc->enabled = (set->mode != NULL);
- if (set->mode != NULL) {
-- DRM_DEBUG("attempting to set mode from userspace\n");
-+ DRM_DEBUG_KMS("attempting to set mode from"
-+ " userspace\n");
- drm_mode_debug_printmodeline(set->mode);
- if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
- set->x, set->y,
-@@ -858,7 +860,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
- DRM_ERROR("failed to set mode on crtc %p\n",
- set->crtc);
- ret = -EINVAL;
-- goto fail_set_mode;
-+ goto fail;
- }
- /* TODO are these needed? */
- set->crtc->desired_x = set->x;
-@@ -873,37 +875,41 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
- ret = crtc_funcs->mode_set_base(set->crtc,
- set->x, set->y, old_fb);
- if (ret != 0)
-- goto fail_set_mode;
-+ goto fail;
- }
-
-+ kfree(save_connectors);
- kfree(save_encoders);
- kfree(save_crtcs);
- return 0;
-
--fail_set_mode:
-- set->crtc->enabled = save_enabled;
-- set->crtc->fb = old_fb;
-+fail:
-+ /* Restore all previous data. */
- count = 0;
-- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-- if (!connector->encoder)
-- continue;
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ *crtc = save_crtcs[count++];
-+ }
-
-- connector->encoder->crtc = save_crtcs[count++];
-+ count = 0;
-+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+ *encoder = save_encoders[count++];
- }
--fail_no_encoder:
-- kfree(save_crtcs);
-+
- count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-- connector->encoder = save_encoders[count++];
-+ *connector = save_connectors[count++];
- }
-+
-+ kfree(save_connectors);
- kfree(save_encoders);
-+ kfree(save_crtcs);
- return ret;
- }
- EXPORT_SYMBOL(drm_crtc_helper_set_config);
-
- bool drm_helper_plugged_event(struct drm_device *dev)
- {
-- DRM_DEBUG("\n");
-+ DRM_DEBUG_KMS("\n");
-
- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
- dev->mode_config.max_height);
-@@ -932,7 +938,6 @@ bool drm_helper_plugged_event(struct drm_device *dev)
- */
- bool drm_helper_initial_config(struct drm_device *dev)
- {
-- struct drm_connector *connector;
- int count = 0;
-
- count = drm_helper_probe_connector_modes(dev,
-@@ -940,16 +945,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
- dev->mode_config.max_height);
-
- /*
-- * None of the available connectors had any modes, so add some
-- * and try to light them up anyway
-+ * we shouldn't end up with no modes here.
- */
-- if (!count) {
-- DRM_ERROR("connectors have no modes, using standard modes\n");
-- list_for_each_entry(connector,
-- &dev->mode_config.connector_list,
-- head)
-- drm_helper_add_std_modes(dev, connector);
-- }
-+ WARN(!count, "Connected connector with 0 modes\n");
-
- drm_setup_crtcs(dev);
-
-diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
-index b39d7bf..a75ca63 100644
---- a/drivers/gpu/drm/drm_drv.c
-+++ b/drivers/gpu/drm/drm_drv.c
-@@ -63,12 +63,12 @@ static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
-- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
-diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
-index 7f2728b..90d76ba 100644
---- a/drivers/gpu/drm/drm_edid.c
-+++ b/drivers/gpu/drm/drm_edid.c
-@@ -60,6 +60,12 @@
- #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
- /* use +hsync +vsync for detailed mode */
- #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-+/* define the number of Extension EDID block */
-+#define MAX_EDID_EXT_NUM 4
-+
-+#define LEVEL_DMT 0
-+#define LEVEL_GTF 1
-+#define LEVEL_CVT 2
-
- static struct edid_quirk {
- char *vendor;
-@@ -237,28 +243,291 @@ static void edid_fixup_preferred(struct drm_connector *connector,
- preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
- }
-
-+/*
-+ * Add the Autogenerated from the DMT spec.
-+ * This table is copied from xfree86/modes/xf86EdidModes.c.
-+ * But the mode with Reduced blank feature is deleted.
-+ */
-+static struct drm_display_mode drm_dmt_modes[] = {
-+ /* 640x350@85Hz */
-+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
-+ 736, 832, 0, 350, 382, 385, 445, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 640x400@85Hz */
-+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
-+ 736, 832, 0, 400, 401, 404, 445, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 720x400@85Hz */
-+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
-+ 828, 936, 0, 400, 401, 404, 446, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 640x480@60Hz */
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
-+ 752, 800, 0, 480, 489, 492, 525, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 640x480@72Hz */
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
-+ 704, 832, 0, 480, 489, 492, 520, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 640x480@75Hz */
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
-+ 720, 840, 0, 480, 481, 484, 500, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 640x480@85Hz */
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
-+ 752, 832, 0, 480, 481, 484, 509, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 800x600@56Hz */
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
-+ 896, 1024, 0, 600, 601, 603, 625, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 800x600@60Hz */
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
-+ 968, 1056, 0, 600, 601, 605, 628, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 800x600@72Hz */
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
-+ 976, 1040, 0, 600, 637, 643, 666, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 800x600@75Hz */
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
-+ 896, 1056, 0, 600, 601, 604, 625, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 800x600@85Hz */
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
-+ 896, 1048, 0, 600, 601, 604, 631, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 848x480@60Hz */
-+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
-+ 976, 1088, 0, 480, 486, 494, 517, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1024x768@43Hz, interlace */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
-+ 1208, 1264, 0, 768, 768, 772, 817, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-+ DRM_MODE_FLAG_INTERLACE) },
-+ /* 1024x768@60Hz */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
-+ 1184, 1344, 0, 768, 771, 777, 806, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 1024x768@70Hz */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
-+ 1184, 1328, 0, 768, 771, 777, 806, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 1024x768@75Hz */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
-+ 1136, 1312, 0, 768, 769, 772, 800, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1024x768@85Hz */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
-+ 1072, 1376, 0, 768, 769, 772, 808, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1152x864@75Hz */
-+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
-+ 1344, 1600, 0, 864, 865, 868, 900, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x768@60Hz */
-+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
-+ 1472, 1664, 0, 768, 771, 778, 798, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x768@75Hz */
-+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
-+ 1488, 1696, 0, 768, 771, 778, 805, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 1280x768@85Hz */
-+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
-+ 1496, 1712, 0, 768, 771, 778, 809, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x800@60Hz */
-+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
-+ 1480, 1680, 0, 800, 803, 809, 831, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
-+ /* 1280x800@75Hz */
-+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
-+ 1488, 1696, 0, 800, 803, 809, 838, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x800@85Hz */
-+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
-+ 1496, 1712, 0, 800, 803, 809, 843, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x960@60Hz */
-+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
-+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x960@85Hz */
-+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
-+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x1024@60Hz */
-+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
-+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x1024@75Hz */
-+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
-+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1280x1024@85Hz */
-+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
-+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1360x768@60Hz */
-+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
-+ 1536, 1792, 0, 768, 771, 777, 795, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1440x1050@60Hz */
-+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
-+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1440x1050@75Hz */
-+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
-+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1440x1050@85Hz */
-+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
-+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1440x900@60Hz */
-+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
-+ 1672, 1904, 0, 900, 903, 909, 934, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1440x900@75Hz */
-+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
-+ 1688, 1936, 0, 900, 903, 909, 942, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1440x900@85Hz */
-+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
-+ 1696, 1952, 0, 900, 903, 909, 948, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1600x1200@60Hz */
-+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
-+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1600x1200@65Hz */
-+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
-+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1600x1200@70Hz */
-+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
-+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1600x1200@75Hz */
-+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
-+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1600x1200@85Hz */
-+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
-+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1680x1050@60Hz */
-+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
-+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1680x1050@75Hz */
-+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
-+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1680x1050@85Hz */
-+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
-+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1792x1344@60Hz */
-+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
-+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1729x1344@75Hz */
-+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
-+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1853x1392@60Hz */
-+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
-+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1856x1392@75Hz */
-+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
-+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1920x1200@60Hz */
-+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
-+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1920x1200@75Hz */
-+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
-+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1920x1200@85Hz */
-+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
-+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1920x1440@60Hz */
-+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
-+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 1920x1440@75Hz */
-+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
-+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 2560x1600@60Hz */
-+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
-+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 2560x1600@75HZ */
-+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
-+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+ /* 2560x1600@85HZ */
-+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
-+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
-+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-+};
-+
-+static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
-+ int hsize, int vsize, int fresh)
-+{
-+ int i, count;
-+ struct drm_display_mode *ptr, *mode;
-+
-+ count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-+ mode = NULL;
-+ for (i = 0; i < count; i++) {
-+ ptr = &drm_dmt_modes[i];
-+ if (hsize == ptr->hdisplay &&
-+ vsize == ptr->vdisplay &&
-+ fresh == drm_mode_vrefresh(ptr)) {
-+ /* get the expected default mode */
-+ mode = drm_mode_duplicate(dev, ptr);
-+ break;
-+ }
-+ }
-+ return mode;
-+}
- /**
- * drm_mode_std - convert standard mode info (width, height, refresh) into mode
- * @t: standard timing params
-+ * @timing_level: standard timing level
- *
- * Take the standard timing params (in this case width, aspect, and refresh)
-- * and convert them into a real mode using CVT.
-+ * and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
- */
- struct drm_display_mode *drm_mode_std(struct drm_device *dev,
-- struct std_timing *t)
-+ struct std_timing *t,
-+ int timing_level)
- {
- struct drm_display_mode *mode;
-- int hsize = t->hsize * 8 + 248, vsize;
-+ int hsize, vsize;
-+ int vrefresh_rate;
- unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
- >> EDID_TIMING_ASPECT_SHIFT;
--
-- mode = drm_mode_create(dev);
-- if (!mode)
-- return NULL;
--
-+ unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
-+ >> EDID_TIMING_VFREQ_SHIFT;
-+
-+ /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
-+ hsize = t->hsize * 8 + 248;
-+ /* vrefresh_rate = vfreq + 60 */
-+ vrefresh_rate = vfreq + 60;
-+ /* the vdisplay is calculated based on the aspect ratio */
- if (aspect_ratio == 0)
- vsize = (hsize * 10) / 16;
- else if (aspect_ratio == 1)
-@@ -267,9 +536,30 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- vsize = (hsize * 4) / 5;
- else
- vsize = (hsize * 9) / 16;
--
-- drm_mode_set_name(mode);
--
-+ /* HDTV hack */
-+ if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
-+ mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
-+ mode->hdisplay = 1366;
-+ mode->vsync_start = mode->vsync_start - 1;
-+ mode->vsync_end = mode->vsync_end - 1;
-+ return mode;
-+ }
-+ mode = NULL;
-+ /* check whether it can be found in default mode table */
-+ mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
-+ if (mode)
-+ return mode;
-+
-+ switch (timing_level) {
-+ case LEVEL_DMT:
-+ break;
-+ case LEVEL_GTF:
-+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
-+ break;
-+ case LEVEL_CVT:
-+ mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
-+ break;
-+ }
- return mode;
- }
-
-@@ -451,6 +741,19 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
-
- return modes;
- }
-+/**
-+ * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
-+ * @edid: EDID block to scan
-+ */
-+static int standard_timing_level(struct edid *edid)
-+{
-+ if (edid->revision >= 2) {
-+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
-+ return LEVEL_CVT;
-+ return LEVEL_GTF;
-+ }
-+ return LEVEL_DMT;
-+}
-
- /**
- * add_standard_modes - get std. modes from EDID and add them
-@@ -463,6 +766,9 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
- {
- struct drm_device *dev = connector->dev;
- int i, modes = 0;
-+ int timing_level;
-+
-+ timing_level = standard_timing_level(edid);
-
- for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
-@@ -472,7 +778,8 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
- if (t->hsize == 1 && t->vfreq_aspect == 1)
- continue;
-
-- newmode = drm_mode_std(dev, &edid->standard_timings[i]);
-+ newmode = drm_mode_std(dev, &edid->standard_timings[i],
-+ timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
-@@ -496,6 +803,9 @@ static int add_detailed_info(struct drm_connector *connector,
- {
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
-+ int timing_level;
-+
-+ timing_level = standard_timing_level(edid);
-
- for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
- struct detailed_timing *timing = &edid->detailed_timings[i];
-@@ -525,7 +835,8 @@ static int add_detailed_info(struct drm_connector *connector,
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
-- newmode = drm_mode_std(dev, std);
-+ newmode = drm_mode_std(dev, std,
-+ timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
-@@ -551,6 +862,122 @@ static int add_detailed_info(struct drm_connector *connector,
-
- return modes;
- }
-+/**
-+ * add_detailed_mode_eedid - get detailed mode info from addtional timing
-+ * EDID block
-+ * @connector: attached connector
-+ * @edid: EDID block to scan(It is only to get addtional timing EDID block)
-+ * @quirks: quirks to apply
-+ *
-+ * Some of the detailed timing sections may contain mode information. Grab
-+ * it and add it to the list.
-+ */
-+static int add_detailed_info_eedid(struct drm_connector *connector,
-+ struct edid *edid, u32 quirks)
-+{
-+ struct drm_device *dev = connector->dev;
-+ int i, j, modes = 0;
-+ char *edid_ext = NULL;
-+ struct detailed_timing *timing;
-+ struct detailed_non_pixel *data;
-+ struct drm_display_mode *newmode;
-+ int edid_ext_num;
-+ int start_offset, end_offset;
-+ int timing_level;
-+
-+ if (edid->version == 1 && edid->revision < 3) {
-+ /* If the EDID version is less than 1.3, there is no
-+ * extension EDID.
-+ */
-+ return 0;
-+ }
-+ if (!edid->extensions) {
-+ /* if there is no extension EDID, it is unnecessary to
-+ * parse the E-EDID to get detailed info
-+ */
-+ return 0;
-+ }
-+
-+ /* Chose real EDID extension number */
-+ edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
-+ MAX_EDID_EXT_NUM : edid->extensions;
-+
-+ /* Find CEA extension */
-+ for (i = 0; i < edid_ext_num; i++) {
-+ edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
-+ /* This block is CEA extension */
-+ if (edid_ext[0] == 0x02)
-+ break;
-+ }
-+
-+ if (i == edid_ext_num) {
-+ /* if there is no additional timing EDID block, return */
-+ return 0;
-+ }
-+
-+ /* Get the start offset of detailed timing block */
-+ start_offset = edid_ext[2];
-+ if (start_offset == 0) {
-+ /* If the start_offset is zero, it means that neither detailed
-+ * info nor data block exist. In such case it is also
-+ * unnecessary to parse the detailed timing info.
-+ */
-+ return 0;
-+ }
-+
-+ timing_level = standard_timing_level(edid);
-+ end_offset = EDID_LENGTH;
-+ end_offset -= sizeof(struct detailed_timing);
-+ for (i = start_offset; i < end_offset;
-+ i += sizeof(struct detailed_timing)) {
-+ timing = (struct detailed_timing *)(edid_ext + i);
-+ data = &timing->data.other_data;
-+ /* Detailed mode timing */
-+ if (timing->pixel_clock) {
-+ newmode = drm_mode_detailed(dev, edid, timing, quirks);
-+ if (!newmode)
-+ continue;
-+
-+ drm_mode_probed_add(connector, newmode);
-+
-+ modes++;
-+ continue;
-+ }
-+
-+ /* Other timing or info */
-+ switch (data->type) {
-+ case EDID_DETAIL_MONITOR_SERIAL:
-+ break;
-+ case EDID_DETAIL_MONITOR_STRING:
-+ break;
-+ case EDID_DETAIL_MONITOR_RANGE:
-+ /* Get monitor range data */
-+ break;
-+ case EDID_DETAIL_MONITOR_NAME:
-+ break;
-+ case EDID_DETAIL_MONITOR_CPDATA:
-+ break;
-+ case EDID_DETAIL_STD_MODES:
-+ /* Five modes per detailed section */
-+ for (j = 0; j < 5; i++) {
-+ struct std_timing *std;
-+ struct drm_display_mode *newmode;
-+
-+ std = &data->data.timings[j];
-+ newmode = drm_mode_std(dev, std, timing_level);
-+ if (newmode) {
-+ drm_mode_probed_add(connector, newmode);
-+ modes++;
-+ }
-+ }
-+ break;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ return modes;
-+}
-
- #define DDC_ADDR 0x50
- /**
-@@ -584,7 +1011,6 @@ int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
-- dev_info(&adapter->dev, "unable to read EDID block.\n");
- return -1;
- }
- EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-@@ -597,8 +1023,6 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
-
- ret = drm_do_probe_ddc_edid(adapter, buf, len);
- if (ret != 0) {
-- dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
-- drm_get_connector_name(connector));
- goto end;
- }
- if (!edid_is_valid((struct edid *)buf)) {
-@@ -610,7 +1034,6 @@ end:
- return ret;
- }
-
--#define MAX_EDID_EXT_NUM 4
- /**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
-@@ -763,6 +1186,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
- num_modes += add_established_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
- num_modes += add_detailed_info(connector, edid, quirks);
-+ num_modes += add_detailed_info_eedid(connector, edid, quirks);
-
- if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
- edid_fixup_preferred(connector, quirks);
-@@ -788,3 +1212,49 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
- return num_modes;
- }
- EXPORT_SYMBOL(drm_add_edid_modes);
-+
-+/**
-+ * drm_add_modes_noedid - add modes for the connectors without EDID
-+ * @connector: connector we're probing
-+ * @hdisplay: the horizontal display limit
-+ * @vdisplay: the vertical display limit
-+ *
-+ * Add the specified modes to the connector's mode list. Only when the
-+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
-+ *
-+ * Return number of modes added or 0 if we couldn't find any.
-+ */
-+int drm_add_modes_noedid(struct drm_connector *connector,
-+ int hdisplay, int vdisplay)
-+{
-+ int i, count, num_modes = 0;
-+ struct drm_display_mode *mode, *ptr;
-+ struct drm_device *dev = connector->dev;
-+
-+ count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-+ if (hdisplay < 0)
-+ hdisplay = 0;
-+ if (vdisplay < 0)
-+ vdisplay = 0;
-+
-+ for (i = 0; i < count; i++) {
-+ ptr = &drm_dmt_modes[i];
-+ if (hdisplay && vdisplay) {
-+ /*
-+ * Only when two are valid, they will be used to check
-+ * whether the mode should be added to the mode list of
-+ * the connector.
-+ */
-+ if (ptr->hdisplay > hdisplay ||
-+ ptr->vdisplay > vdisplay)
-+ continue;
-+ }
-+ mode = drm_mode_duplicate(dev, ptr);
-+ if (mode) {
-+ drm_mode_probed_add(connector, mode);
-+ num_modes++;
-+ }
-+ }
-+ return num_modes;
-+}
-+EXPORT_SYMBOL(drm_add_modes_noedid);
-diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
-new file mode 100644
-index 0000000..f018469
---- /dev/null
-+++ b/drivers/gpu/drm/drm_encoder_slave.c
-@@ -0,0 +1,116 @@
-+/*
-+ * Copyright (C) 2009 Francisco Jerez.
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining
-+ * a copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sublicense, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial
-+ * portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ */
-+
-+#include "drm_encoder_slave.h"
-+
-+/**
-+ * drm_i2c_encoder_init - Initialize an I2C slave encoder
-+ * @dev: DRM device.
-+ * @encoder: Encoder to be attached to the I2C device. You aren't
-+ * required to have called drm_encoder_init() before.
-+ * @adap: I2C adapter that will be used to communicate with
-+ * the device.
-+ * @info: Information that will be used to create the I2C device.
-+ * Required fields are @addr and @type.
-+ *
-+ * Create an I2C device on the specified bus (the module containing its
-+ * driver is transparently loaded) and attach it to the specified
-+ * &drm_encoder_slave. The @slave_funcs field will be initialized with
-+ * the hooks provided by the slave driver.
-+ *
-+ * Returns 0 on success or a negative errno on failure, in particular,
-+ * -ENODEV is returned when no matching driver is found.
-+ */
-+int drm_i2c_encoder_init(struct drm_device *dev,
-+ struct drm_encoder_slave *encoder,
-+ struct i2c_adapter *adap,
-+ const struct i2c_board_info *info)
-+{
-+ char modalias[sizeof(I2C_MODULE_PREFIX)
-+ + I2C_NAME_SIZE];
-+ struct module *module = NULL;
-+ struct i2c_client *client;
-+ struct drm_i2c_encoder_driver *encoder_drv;
-+ int err = 0;
-+
-+ snprintf(modalias, sizeof(modalias),
-+ "%s%s", I2C_MODULE_PREFIX, info->type);
-+ request_module(modalias);
-+
-+ client = i2c_new_device(adap, info);
-+ if (!client) {
-+ err = -ENOMEM;
-+ goto fail;
-+ }
-+
-+ if (!client->driver) {
-+ err = -ENODEV;
-+ goto fail_unregister;
-+ }
-+
-+ module = client->driver->driver.owner;
-+ if (!try_module_get(module)) {
-+ err = -ENODEV;
-+ goto fail_unregister;
-+ }
-+
-+ encoder->bus_priv = client;
-+
-+ encoder_drv = to_drm_i2c_encoder_driver(client->driver);
-+
-+ err = encoder_drv->encoder_init(client, dev, encoder);
-+ if (err)
-+ goto fail_unregister;
-+
-+ return 0;
-+
-+fail_unregister:
-+ i2c_unregister_device(client);
-+ module_put(module);
-+fail:
-+ return err;
-+}
-+EXPORT_SYMBOL(drm_i2c_encoder_init);
-+
-+/**
-+ * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
-+ * @drm_encoder: Encoder to be unregistered.
-+ *
-+ * This should be called from the @destroy method of an I2C slave
-+ * encoder driver once I2C access is no longer needed.
-+ */
-+void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
-+{
-+ struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
-+ struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
-+ struct module *module = client->driver->driver.owner;
-+
-+ i2c_unregister_device(client);
-+ encoder->bus_priv = NULL;
-+
-+ module_put(module);
-+}
-+EXPORT_SYMBOL(drm_i2c_encoder_destroy);
-diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
-new file mode 100644
-index 0000000..8eee4a6
---- /dev/null
-+++ b/drivers/gpu/drm/drm_fb_helper.c
-@@ -0,0 +1,701 @@
-+/*
-+ * Copyright (c) 2006-2009 Red Hat Inc.
-+ * Copyright (c) 2006-2008 Intel Corporation
-+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
-+ *
-+ * DRM framebuffer helper functions
-+ *
-+ * Permission to use, copy, modify, distribute, and sell this software and its
-+ * documentation for any purpose is hereby granted without fee, provided that
-+ * the above copyright notice appear in all copies and that both that copyright
-+ * notice and this permission notice appear in supporting documentation, and
-+ * that the name of the copyright holders not be used in advertising or
-+ * publicity pertaining to distribution of the software without specific,
-+ * written prior permission. The copyright holders make no representations
-+ * about the suitability of this software for any purpose. It is provided "as
-+ * is" without express or implied warranty.
-+ *
-+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
-+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
-+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-+ * OF THIS SOFTWARE.
-+ *
-+ * Authors:
-+ * Dave Airlie <airlied@linux.ie>
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+#include <linux/sysrq.h>
-+#include <linux/fb.h>
-+#include "drmP.h"
-+#include "drm_crtc.h"
-+#include "drm_fb_helper.h"
-+#include "drm_crtc_helper.h"
-+
-+MODULE_AUTHOR("David Airlie, Jesse Barnes");
-+MODULE_DESCRIPTION("DRM KMS helper");
-+MODULE_LICENSE("GPL and additional rights");
-+
-+static LIST_HEAD(kernel_fb_helper_list);
-+
-+bool drm_fb_helper_force_kernel_mode(void)
-+{
-+ int i = 0;
-+ bool ret, error = false;
-+ struct drm_fb_helper *helper;
-+
-+ if (list_empty(&kernel_fb_helper_list))
-+ return false;
-+
-+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
-+ for (i = 0; i < helper->crtc_count; i++) {
-+ struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
-+ ret = drm_crtc_helper_set_config(mode_set);
-+ if (ret)
-+ error = true;
-+ }
-+ }
-+ return error;
-+}
-+
-+int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
-+ void *panic_str)
-+{
-+ DRM_ERROR("panic occurred, switching back to text console\n");
-+ return drm_fb_helper_force_kernel_mode();
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_panic);
-+
-+static struct notifier_block paniced = {
-+ .notifier_call = drm_fb_helper_panic,
-+};
-+
-+/**
-+ * drm_fb_helper_restore - restore the framebuffer console (kernel) config
-+ *
-+ * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
-+ */
-+void drm_fb_helper_restore(void)
-+{
-+ bool ret;
-+ ret = drm_fb_helper_force_kernel_mode();
-+ if (ret == true)
-+ DRM_ERROR("Failed to restore crtc configuration\n");
-+}
-+EXPORT_SYMBOL(drm_fb_helper_restore);
-+
-+static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
-+{
-+ drm_fb_helper_restore();
-+}
-+static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-+
-+static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3)
-+{
-+ schedule_work(&drm_fb_helper_restore_work);
-+}
-+
-+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
-+ .handler = drm_fb_helper_sysrq,
-+ .help_msg = "force-fb(V)",
-+ .action_msg = "Restore framebuffer console",
-+};
-+
-+static void drm_fb_helper_on(struct fb_info *info)
-+{
-+ struct drm_fb_helper *fb_helper = info->par;
-+ struct drm_device *dev = fb_helper->dev;
-+ struct drm_crtc *crtc;
-+ struct drm_encoder *encoder;
-+ int i;
-+
-+ /*
-+ * For each CRTC in this fb, turn the crtc on then,
-+ * find all associated encoders and turn them on.
-+ */
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-+
-+ for (i = 0; i < fb_helper->crtc_count; i++) {
-+ if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-+ break;
-+ }
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ /* Found a CRTC on this fb, now find encoders */
-+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+ if (encoder->crtc == crtc) {
-+ struct drm_encoder_helper_funcs *encoder_funcs;
-+
-+ encoder_funcs = encoder->helper_private;
-+ mutex_lock(&dev->mode_config.mutex);
-+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-+ mutex_unlock(&dev->mode_config.mutex);
-+ }
-+ }
-+ }
-+}
-+
-+static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
-+{
-+ struct drm_fb_helper *fb_helper = info->par;
-+ struct drm_device *dev = fb_helper->dev;
-+ struct drm_crtc *crtc;
-+ struct drm_encoder *encoder;
-+ int i;
-+
-+ /*
-+ * For each CRTC in this fb, find all associated encoders
-+ * and turn them off, then turn off the CRTC.
-+ */
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-+
-+ for (i = 0; i < fb_helper->crtc_count; i++) {
-+ if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-+ break;
-+ }
-+
-+ /* Found a CRTC on this fb, now find encoders */
-+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+ if (encoder->crtc == crtc) {
-+ struct drm_encoder_helper_funcs *encoder_funcs;
-+
-+ encoder_funcs = encoder->helper_private;
-+ mutex_lock(&dev->mode_config.mutex);
-+ encoder_funcs->dpms(encoder, dpms_mode);
-+ mutex_unlock(&dev->mode_config.mutex);
-+ }
-+ }
-+ if (dpms_mode == DRM_MODE_DPMS_OFF) {
-+ mutex_lock(&dev->mode_config.mutex);
-+ crtc_funcs->dpms(crtc, dpms_mode);
-+ mutex_unlock(&dev->mode_config.mutex);
-+ }
-+ }
-+}
-+
-+int drm_fb_helper_blank(int blank, struct fb_info *info)
-+{
-+ switch (blank) {
-+ case FB_BLANK_UNBLANK:
-+ drm_fb_helper_on(info);
-+ break;
-+ case FB_BLANK_NORMAL:
-+ drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
-+ break;
-+ case FB_BLANK_HSYNC_SUSPEND:
-+ drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
-+ break;
-+ case FB_BLANK_VSYNC_SUSPEND:
-+ drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
-+ break;
-+ case FB_BLANK_POWERDOWN:
-+ drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
-+ break;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_blank);
-+
-+static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
-+{
-+ int i;
-+
-+ for (i = 0; i < helper->crtc_count; i++)
-+ kfree(helper->crtc_info[i].mode_set.connectors);
-+ kfree(helper->crtc_info);
-+}
-+
-+int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
-+{
-+ struct drm_device *dev = helper->dev;
-+ struct drm_crtc *crtc;
-+ int ret = 0;
-+ int i;
-+
-+ helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
-+ if (!helper->crtc_info)
-+ return -ENOMEM;
-+
-+ helper->crtc_count = crtc_count;
-+
-+ for (i = 0; i < crtc_count; i++) {
-+ helper->crtc_info[i].mode_set.connectors =
-+ kcalloc(max_conn_count,
-+ sizeof(struct drm_connector *),
-+ GFP_KERNEL);
-+
-+ if (!helper->crtc_info[i].mode_set.connectors) {
-+ ret = -ENOMEM;
-+ goto out_free;
-+ }
-+ helper->crtc_info[i].mode_set.num_connectors = 0;
-+ }
-+
-+ i = 0;
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ helper->crtc_info[i].crtc_id = crtc->base.id;
-+ helper->crtc_info[i].mode_set.crtc = crtc;
-+ i++;
-+ }
-+ helper->conn_limit = max_conn_count;
-+ return 0;
-+out_free:
-+ drm_fb_helper_crtc_free(helper);
-+ return -ENOMEM;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
-+
-+int drm_fb_helper_setcolreg(unsigned regno,
-+ unsigned red,
-+ unsigned green,
-+ unsigned blue,
-+ unsigned transp,
-+ struct fb_info *info)
-+{
-+ struct drm_fb_helper *fb_helper = info->par;
-+ struct drm_device *dev = fb_helper->dev;
-+ struct drm_crtc *crtc;
-+ int i;
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ struct drm_framebuffer *fb = fb_helper->fb;
-+
-+ for (i = 0; i < fb_helper->crtc_count; i++) {
-+ if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-+ break;
-+ }
-+ if (i == fb_helper->crtc_count)
-+ continue;
-+
-+ if (regno > 255)
-+ return 1;
-+
-+ if (fb->depth == 8) {
-+ fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
-+ return 0;
-+ }
-+
-+ if (regno < 16) {
-+ switch (fb->depth) {
-+ case 15:
-+ fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
-+ ((green & 0xf800) >> 6) |
-+ ((blue & 0xf800) >> 11);
-+ break;
-+ case 16:
-+ fb->pseudo_palette[regno] = (red & 0xf800) |
-+ ((green & 0xfc00) >> 5) |
-+ ((blue & 0xf800) >> 11);
-+ break;
-+ case 24:
-+ case 32:
-+ fb->pseudo_palette[regno] =
-+ (((red >> 8) & 0xff) << info->var.red.offset) |
-+ (((green >> 8) & 0xff) << info->var.green.offset) |
-+ (((blue >> 8) & 0xff) << info->var.blue.offset);
-+ break;
-+ }
-+ }
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-+
-+int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
-+ struct fb_info *info)
-+{
-+ struct drm_fb_helper *fb_helper = info->par;
-+ struct drm_framebuffer *fb = fb_helper->fb;
-+ int depth;
-+
-+ if (var->pixclock == -1 || !var->pixclock)
-+ return -EINVAL;
-+
-+ /* Need to resize the fb object !!! */
-+ if (var->xres > fb->width || var->yres > fb->height) {
-+ DRM_ERROR("Requested width/height is greater than current fb "
-+ "object %dx%d > %dx%d\n", var->xres, var->yres,
-+ fb->width, fb->height);
-+ DRM_ERROR("Need resizing code.\n");
-+ return -EINVAL;
-+ }
-+
-+ switch (var->bits_per_pixel) {
-+ case 16:
-+ depth = (var->green.length == 6) ? 16 : 15;
-+ break;
-+ case 32:
-+ depth = (var->transp.length > 0) ? 32 : 24;
-+ break;
-+ default:
-+ depth = var->bits_per_pixel;
-+ break;
-+ }
-+
-+ switch (depth) {
-+ case 8:
-+ var->red.offset = 0;
-+ var->green.offset = 0;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 15:
-+ var->red.offset = 10;
-+ var->green.offset = 5;
-+ var->blue.offset = 0;
-+ var->red.length = 5;
-+ var->green.length = 5;
-+ var->blue.length = 5;
-+ var->transp.length = 1;
-+ var->transp.offset = 15;
-+ break;
-+ case 16:
-+ var->red.offset = 11;
-+ var->green.offset = 5;
-+ var->blue.offset = 0;
-+ var->red.length = 5;
-+ var->green.length = 6;
-+ var->blue.length = 5;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 24:
-+ var->red.offset = 16;
-+ var->green.offset = 8;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 32:
-+ var->red.offset = 16;
-+ var->green.offset = 8;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 8;
-+ var->transp.offset = 24;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_check_var);
-+
-+/* this will let fbcon do the mode init */
-+int drm_fb_helper_set_par(struct fb_info *info)
-+{
-+ struct drm_fb_helper *fb_helper = info->par;
-+ struct drm_device *dev = fb_helper->dev;
-+ struct fb_var_screeninfo *var = &info->var;
-+ struct drm_crtc *crtc;
-+ int ret;
-+ int i;
-+
-+ if (var->pixclock != -1) {
-+ DRM_ERROR("PIXEL CLCOK SET\n");
-+ return -EINVAL;
-+ }
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+
-+ for (i = 0; i < fb_helper->crtc_count; i++) {
-+ if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-+ break;
-+ }
-+ if (i == fb_helper->crtc_count)
-+ continue;
-+
-+ if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
-+ mutex_lock(&dev->mode_config.mutex);
-+ ret = crtc->funcs->set_config(&fb_helper->crtc_info->mode_set);
-+ mutex_unlock(&dev->mode_config.mutex);
-+ if (ret)
-+ return ret;
-+ }
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_set_par);
-+
-+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
-+ struct fb_info *info)
-+{
-+ struct drm_fb_helper *fb_helper = info->par;
-+ struct drm_device *dev = fb_helper->dev;
-+ struct drm_mode_set *modeset;
-+ struct drm_crtc *crtc;
-+ int ret = 0;
-+ int i;
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ for (i = 0; i < fb_helper->crtc_count; i++) {
-+ if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
-+ break;
-+ }
-+
-+ if (i == fb_helper->crtc_count)
-+ continue;
-+
-+ modeset = &fb_helper->crtc_info[i].mode_set;
-+
-+ modeset->x = var->xoffset;
-+ modeset->y = var->yoffset;
-+
-+ if (modeset->num_connectors) {
-+ mutex_lock(&dev->mode_config.mutex);
-+ ret = crtc->funcs->set_config(modeset);
-+ mutex_unlock(&dev->mode_config.mutex);
-+ if (!ret) {
-+ info->var.xoffset = var->xoffset;
-+ info->var.yoffset = var->yoffset;
-+ }
-+ }
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_pan_display);
-+
-+int drm_fb_helper_single_fb_probe(struct drm_device *dev,
-+ int (*fb_create)(struct drm_device *dev,
-+ uint32_t fb_width,
-+ uint32_t fb_height,
-+ uint32_t surface_width,
-+ uint32_t surface_height,
-+ struct drm_framebuffer **fb_ptr))
-+{
-+ struct drm_crtc *crtc;
-+ struct drm_connector *connector;
-+ unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
-+ unsigned int surface_width = 0, surface_height = 0;
-+ int new_fb = 0;
-+ int crtc_count = 0;
-+ int ret, i, conn_count = 0;
-+ struct fb_info *info;
-+ struct drm_framebuffer *fb;
-+ struct drm_mode_set *modeset = NULL;
-+ struct drm_fb_helper *fb_helper;
-+
-+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ if (drm_helper_crtc_in_use(crtc)) {
-+ if (crtc->desired_mode) {
-+ if (crtc->desired_mode->hdisplay < fb_width)
-+ fb_width = crtc->desired_mode->hdisplay;
-+
-+ if (crtc->desired_mode->vdisplay < fb_height)
-+ fb_height = crtc->desired_mode->vdisplay;
-+
-+ if (crtc->desired_mode->hdisplay > surface_width)
-+ surface_width = crtc->desired_mode->hdisplay;
-+
-+ if (crtc->desired_mode->vdisplay > surface_height)
-+ surface_height = crtc->desired_mode->vdisplay;
-+ }
-+ crtc_count++;
-+ }
-+ }
-+
-+ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
-+ /* hmm everyone went away - assume VGA cable just fell out
-+ and will come back later. */
-+ return 0;
-+ }
-+
-+ /* do we have an fb already? */
-+ if (list_empty(&dev->mode_config.fb_kernel_list)) {
-+ ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
-+ surface_height, &fb);
-+ if (ret)
-+ return -EINVAL;
-+ new_fb = 1;
-+ } else {
-+ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
-+ struct drm_framebuffer, filp_head);
-+
-+ /* if someone hotplugs something bigger than we have already allocated, we are pwned.
-+ As really we can't resize an fbdev that is in the wild currently due to fbdev
-+ not really being designed for the lower layers moving stuff around under it.
-+ - so in the grand style of things - punt. */
-+ if ((fb->width < surface_width) ||
-+ (fb->height < surface_height)) {
-+ DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
-+ return -EINVAL;
-+ }
-+ }
-+
-+ info = fb->fbdev;
-+ fb_helper = info->par;
-+
-+ crtc_count = 0;
-+ /* okay we need to setup new connector sets in the crtcs */
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ modeset = &fb_helper->crtc_info[crtc_count].mode_set;
-+ modeset->fb = fb;
-+ conn_count = 0;
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ if (connector->encoder)
-+ if (connector->encoder->crtc == modeset->crtc) {
-+ modeset->connectors[conn_count] = connector;
-+ conn_count++;
-+ if (conn_count > fb_helper->conn_limit)
-+ BUG();
-+ }
-+ }
-+
-+ for (i = conn_count; i < fb_helper->conn_limit; i++)
-+ modeset->connectors[i] = NULL;
-+
-+ modeset->crtc = crtc;
-+ crtc_count++;
-+
-+ modeset->num_connectors = conn_count;
-+ if (modeset->crtc->desired_mode) {
-+ if (modeset->mode)
-+ drm_mode_destroy(dev, modeset->mode);
-+ modeset->mode = drm_mode_duplicate(dev,
-+ modeset->crtc->desired_mode);
-+ }
-+ }
-+ fb_helper->crtc_count = crtc_count;
-+ fb_helper->fb = fb;
-+
-+ if (new_fb) {
-+ info->var.pixclock = -1;
-+ if (register_framebuffer(info) < 0)
-+ return -EINVAL;
-+ } else {
-+ drm_fb_helper_set_par(info);
-+ }
-+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-+ info->fix.id);
-+
-+ /* Switch back to kernel console on panic */
-+ /* multi card linked list maybe */
-+ if (list_empty(&kernel_fb_helper_list)) {
-+ printk(KERN_INFO "registered panic notifier\n");
-+ atomic_notifier_chain_register(&panic_notifier_list,
-+ &paniced);
-+ register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-+ }
-+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-+
-+void drm_fb_helper_free(struct drm_fb_helper *helper)
-+{
-+ list_del(&helper->kernel_fb_list);
-+ if (list_empty(&kernel_fb_helper_list)) {
-+ printk(KERN_INFO "unregistered panic notifier\n");
-+ atomic_notifier_chain_unregister(&panic_notifier_list,
-+ &paniced);
-+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-+ }
-+ drm_fb_helper_crtc_free(helper);
-+}
-+EXPORT_SYMBOL(drm_fb_helper_free);
-+
-+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch)
-+{
-+ info->fix.type = FB_TYPE_PACKED_PIXELS;
-+ info->fix.visual = FB_VISUAL_TRUECOLOR;
-+ info->fix.type_aux = 0;
-+ info->fix.xpanstep = 1; /* doing it in hw */
-+ info->fix.ypanstep = 1; /* doing it in hw */
-+ info->fix.ywrapstep = 0;
-+ info->fix.accel = FB_ACCEL_NONE;
-+ info->fix.type_aux = 0;
-+
-+ info->fix.line_length = pitch;
-+ return;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-+
-+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
-+ uint32_t fb_width, uint32_t fb_height)
-+{
-+ info->pseudo_palette = fb->pseudo_palette;
-+ info->var.xres_virtual = fb->width;
-+ info->var.yres_virtual = fb->height;
-+ info->var.bits_per_pixel = fb->bits_per_pixel;
-+ info->var.xoffset = 0;
-+ info->var.yoffset = 0;
-+ info->var.activate = FB_ACTIVATE_NOW;
-+ info->var.height = -1;
-+ info->var.width = -1;
-+
-+ switch (fb->depth) {
-+ case 8:
-+ info->var.red.offset = 0;
-+ info->var.green.offset = 0;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 8; /* 8bit DAC */
-+ info->var.green.length = 8;
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 0;
-+ info->var.transp.length = 0;
-+ break;
-+ case 15:
-+ info->var.red.offset = 10;
-+ info->var.green.offset = 5;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 5;
-+ info->var.green.length = 5;
-+ info->var.blue.length = 5;
-+ info->var.transp.offset = 15;
-+ info->var.transp.length = 1;
-+ break;
-+ case 16:
-+ info->var.red.offset = 11;
-+ info->var.green.offset = 5;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 5;
-+ info->var.green.length = 6;
-+ info->var.blue.length = 5;
-+ info->var.transp.offset = 0;
-+ break;
-+ case 24:
-+ info->var.red.offset = 16;
-+ info->var.green.offset = 8;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 8;
-+ info->var.green.length = 8;
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 0;
-+ info->var.transp.length = 0;
-+ break;
-+ case 32:
-+ info->var.red.offset = 16;
-+ info->var.green.offset = 8;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 8;
-+ info->var.green.length = 8;
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 24;
-+ info->var.transp.length = 8;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ info->var.xres = fb_width;
-+ info->var.yres = fb_height;
-+}
-+EXPORT_SYMBOL(drm_fb_helper_fill_var);
-diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
-index ffe8f43..230c9ff 100644
---- a/drivers/gpu/drm/drm_gem.c
-+++ b/drivers/gpu/drm/drm_gem.c
-@@ -164,7 +164,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
- * Removes the mapping from handle to filp for this object.
- */
- static int
--drm_gem_handle_delete(struct drm_file *filp, int handle)
-+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
- {
- struct drm_device *dev;
- struct drm_gem_object *obj;
-@@ -207,7 +207,7 @@ drm_gem_handle_delete(struct drm_file *filp, int handle)
- int
- drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
-- int *handlep)
-+ u32 *handlep)
- {
- int ret;
-
-@@ -221,7 +221,7 @@ again:
-
- /* do the allocation under our spinlock */
- spin_lock(&file_priv->table_lock);
-- ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
-+ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
- spin_unlock(&file_priv->table_lock);
- if (ret == -EAGAIN)
- goto again;
-@@ -237,7 +237,7 @@ EXPORT_SYMBOL(drm_gem_handle_create);
- /** Returns a reference to the object named by the handle. */
- struct drm_gem_object *
- drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
-- int handle)
-+ u32 handle)
- {
- struct drm_gem_object *obj;
-
-@@ -344,7 +344,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
- struct drm_gem_open *args = data;
- struct drm_gem_object *obj;
- int ret;
-- int handle;
-+ u32 handle;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-@@ -539,7 +539,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
- vma->vm_ops = obj->dev->driver->gem_vm_ops;
- vma->vm_private_data = map->handle;
-- /* FIXME: use pgprot_writecombine when available */
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- /* Take a ref for this mapping of the object, so that the fault
-diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
-index 3e47869..c861d80 100644
---- a/drivers/gpu/drm/drm_mm.c
-+++ b/drivers/gpu/drm/drm_mm.c
-@@ -44,6 +44,7 @@
- #include "drmP.h"
- #include "drm_mm.h"
- #include <linux/slab.h>
-+#include <linux/seq_file.h>
-
- #define MM_UNUSED_TARGET 4
-
-@@ -370,3 +371,23 @@ void drm_mm_takedown(struct drm_mm * mm)
- BUG_ON(mm->num_unused != 0);
- }
- EXPORT_SYMBOL(drm_mm_takedown);
-+
-+#if defined(CONFIG_DEBUG_FS)
-+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
-+{
-+ struct drm_mm_node *entry;
-+ int total_used = 0, total_free = 0, total = 0;
-+
-+ list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
-+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
-+ total += entry->size;
-+ if (entry->free)
-+ total_free += entry->size;
-+ else
-+ total_used += entry->size;
-+ }
-+ seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_mm_dump_table);
-+#endif
-diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
-index 7914097..49404ce 100644
---- a/drivers/gpu/drm/drm_modes.c
-+++ b/drivers/gpu/drm/drm_modes.c
-@@ -8,6 +8,8 @@
- * Copyright © 2007 Dave Airlie
- * Copyright © 2007-2008 Intel Corporation
- * Jesse Barnes <jesse.barnes@intel.com>
-+ * Copyright 2005-2006 Luc Verhaegen
-+ * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
-@@ -38,7 +40,6 @@
- #include "drm.h"
- #include "drm_crtc.h"
-
--#define DRM_MODESET_DEBUG "drm_mode"
- /**
- * drm_mode_debug_printmodeline - debug print a mode
- * @dev: DRM device
-@@ -51,8 +52,8 @@
- */
- void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
- {
-- DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
-- "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
-+ DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
-+ "0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
-@@ -62,6 +63,420 @@ void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
- EXPORT_SYMBOL(drm_mode_debug_printmodeline);
-
- /**
-+ * drm_cvt_mode -create a modeline based on CVT algorithm
-+ * @dev: DRM device
-+ * @hdisplay: hdisplay size
-+ * @vdisplay: vdisplay size
-+ * @vrefresh : vrefresh rate
-+ * @reduced : Whether the GTF calculation is simplified
-+ * @interlaced:Whether the interlace is supported
-+ *
-+ * LOCKING:
-+ * none.
-+ *
-+ * return the modeline based on CVT algorithm
-+ *
-+ * This function is called to generate the modeline based on CVT algorithm
-+ * according to the hdisplay, vdisplay, vrefresh.
-+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
-+ * Graham Loveridge April 9, 2003 available at
-+ * http://www.vesa.org/public/CVT/CVTd6r1.xls
-+ *
-+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
-+ * What I have done is to translate it by using integer calculation.
-+ */
-+#define HV_FACTOR 1000
-+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
-+ int vdisplay, int vrefresh,
-+ bool reduced, bool interlaced)
-+{
-+ /* 1) top/bottom margin size (% of height) - default: 1.8, */
-+#define CVT_MARGIN_PERCENTAGE 18
-+ /* 2) character cell horizontal granularity (pixels) - default 8 */
-+#define CVT_H_GRANULARITY 8
-+ /* 3) Minimum vertical porch (lines) - default 3 */
-+#define CVT_MIN_V_PORCH 3
-+ /* 4) Minimum number of vertical back porch lines - default 6 */
-+#define CVT_MIN_V_BPORCH 6
-+ /* Pixel Clock step (kHz) */
-+#define CVT_CLOCK_STEP 250
-+ struct drm_display_mode *drm_mode;
-+ bool margins = false;
-+ unsigned int vfieldrate, hperiod;
-+ int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
-+ int interlace;
-+
-+ /* allocate the drm_display_mode structure. If failure, we will
-+ * return directly
-+ */
-+ drm_mode = drm_mode_create(dev);
-+ if (!drm_mode)
-+ return NULL;
-+
-+ /* the CVT default refresh rate is 60Hz */
-+ if (!vrefresh)
-+ vrefresh = 60;
-+
-+ /* the required field fresh rate */
-+ if (interlaced)
-+ vfieldrate = vrefresh * 2;
-+ else
-+ vfieldrate = vrefresh;
-+
-+ /* horizontal pixels */
-+ hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
-+
-+ /* determine the left&right borders */
-+ hmargin = 0;
-+ if (margins) {
-+ hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
-+ hmargin -= hmargin % CVT_H_GRANULARITY;
-+ }
-+ /* find the total active pixels */
-+ drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
-+
-+ /* find the number of lines per field */
-+ if (interlaced)
-+ vdisplay_rnd = vdisplay / 2;
-+ else
-+ vdisplay_rnd = vdisplay;
-+
-+ /* find the top & bottom borders */
-+ vmargin = 0;
-+ if (margins)
-+ vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
-+
-+ drm_mode->vdisplay = vdisplay + 2 * vmargin;
-+
-+ /* Interlaced */
-+ if (interlaced)
-+ interlace = 1;
-+ else
-+ interlace = 0;
-+
-+ /* Determine VSync Width from aspect ratio */
-+ if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
-+ vsync = 4;
-+ else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
-+ vsync = 5;
-+ else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
-+ vsync = 6;
-+ else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
-+ vsync = 7;
-+ else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
-+ vsync = 7;
-+ else /* custom */
-+ vsync = 10;
-+
-+ if (!reduced) {
-+ /* simplify the GTF calculation */
-+ /* 4) Minimum time of vertical sync + back porch interval (µs)
-+ * default 550.0
-+ */
-+ int tmp1, tmp2;
-+#define CVT_MIN_VSYNC_BP 550
-+ /* 3) Nominal HSync width (% of line period) - default 8 */
-+#define CVT_HSYNC_PERCENTAGE 8
-+ unsigned int hblank_percentage;
-+ int vsyncandback_porch, vback_porch, hblank;
-+
-+ /* estimated the horizontal period */
-+ tmp1 = HV_FACTOR * 1000000 -
-+ CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
-+ tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
-+ interlace;
-+ hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
-+
-+ tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
-+ /* 9. Find number of lines in sync + backporch */
-+ if (tmp1 < (vsync + CVT_MIN_V_PORCH))
-+ vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
-+ else
-+ vsyncandback_porch = tmp1;
-+ /* 10. Find number of lines in back porch */
-+ vback_porch = vsyncandback_porch - vsync;
-+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
-+ vsyncandback_porch + CVT_MIN_V_PORCH;
-+ /* 5) Definition of Horizontal blanking time limitation */
-+ /* Gradient (%/kHz) - default 600 */
-+#define CVT_M_FACTOR 600
-+ /* Offset (%) - default 40 */
-+#define CVT_C_FACTOR 40
-+ /* Blanking time scaling factor - default 128 */
-+#define CVT_K_FACTOR 128
-+ /* Scaling factor weighting - default 20 */
-+#define CVT_J_FACTOR 20
-+#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
-+#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
-+ CVT_J_FACTOR)
-+ /* 12. Find ideal blanking duty cycle from formula */
-+ hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
-+ hperiod / 1000;
-+ /* 13. Blanking time */
-+ if (hblank_percentage < 20 * HV_FACTOR)
-+ hblank_percentage = 20 * HV_FACTOR;
-+ hblank = drm_mode->hdisplay * hblank_percentage /
-+ (100 * HV_FACTOR - hblank_percentage);
-+ hblank -= hblank % (2 * CVT_H_GRANULARITY);
-+ /* 14. find the total pixes per line */
-+ drm_mode->htotal = drm_mode->hdisplay + hblank;
-+ drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
-+ drm_mode->hsync_start = drm_mode->hsync_end -
-+ (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
-+ drm_mode->hsync_start += CVT_H_GRANULARITY -
-+ drm_mode->hsync_start % CVT_H_GRANULARITY;
-+ /* fill the Vsync values */
-+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
-+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
-+ } else {
-+ /* Reduced blanking */
-+ /* Minimum vertical blanking interval time (µs)- default 460 */
-+#define CVT_RB_MIN_VBLANK 460
-+ /* Fixed number of clocks for horizontal sync */
-+#define CVT_RB_H_SYNC 32
-+ /* Fixed number of clocks for horizontal blanking */
-+#define CVT_RB_H_BLANK 160
-+ /* Fixed number of lines for vertical front porch - default 3*/
-+#define CVT_RB_VFPORCH 3
-+ int vbilines;
-+ int tmp1, tmp2;
-+ /* 8. Estimate Horizontal period. */
-+ tmp1 = HV_FACTOR * 1000000 -
-+ CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
-+ tmp2 = vdisplay_rnd + 2 * vmargin;
-+ hperiod = tmp1 / (tmp2 * vfieldrate);
-+ /* 9. Find number of lines in vertical blanking */
-+ vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
-+ /* 10. Check if vertical blanking is sufficient */
-+ if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
-+ vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
-+ /* 11. Find total number of lines in vertical field */
-+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
-+ /* 12. Find total number of pixels in a line */
-+ drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
-+ /* Fill in HSync values */
-+ drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
-+ drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
-+ }
-+ /* 15/13. Find pixel clock frequency (kHz for xf86) */
-+ drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
-+ drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
-+ /* 18/16. Find actual vertical frame frequency */
-+ /* ignore - just set the mode flag for interlaced */
-+ if (interlaced)
-+ drm_mode->vtotal *= 2;
-+ /* Fill the mode line name */
-+ drm_mode_set_name(drm_mode);
-+ if (reduced)
-+ drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
-+ DRM_MODE_FLAG_NVSYNC);
-+ else
-+ drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
-+ DRM_MODE_FLAG_NHSYNC);
-+ if (interlaced)
-+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
-+
-+ return drm_mode;
-+}
-+EXPORT_SYMBOL(drm_cvt_mode);
-+
-+/**
-+ * drm_gtf_mode - create the modeline based on GTF algorithm
-+ *
-+ * @dev :drm device
-+ * @hdisplay :hdisplay size
-+ * @vdisplay :vdisplay size
-+ * @vrefresh :vrefresh rate.
-+ * @interlaced :whether the interlace is supported
-+ * @margins :whether the margin is supported
-+ *
-+ * LOCKING.
-+ * none.
-+ *
-+ * return the modeline based on GTF algorithm
-+ *
-+ * This function is to create the modeline based on the GTF algorithm.
-+ * Generalized Timing Formula is derived from:
-+ * GTF Spreadsheet by Andy Morrish (1/5/97)
-+ * available at http://www.vesa.org
-+ *
-+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
-+ * What I have done is to translate it by using integer calculation.
-+ * I also refer to the function of fb_get_mode in the file of
-+ * drivers/video/fbmon.c
-+ */
-+struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
-+ int vdisplay, int vrefresh,
-+ bool interlaced, int margins)
-+{
-+ /* 1) top/bottom margin size (% of height) - default: 1.8, */
-+#define GTF_MARGIN_PERCENTAGE 18
-+ /* 2) character cell horizontal granularity (pixels) - default 8 */
-+#define GTF_CELL_GRAN 8
-+ /* 3) Minimum vertical porch (lines) - default 3 */
-+#define GTF_MIN_V_PORCH 1
-+ /* width of vsync in lines */
-+#define V_SYNC_RQD 3
-+ /* width of hsync as % of total line */
-+#define H_SYNC_PERCENT 8
-+ /* min time of vsync + back porch (microsec) */
-+#define MIN_VSYNC_PLUS_BP 550
-+ /* blanking formula gradient */
-+#define GTF_M 600
-+ /* blanking formula offset */
-+#define GTF_C 40
-+ /* blanking formula scaling factor */
-+#define GTF_K 128
-+ /* blanking formula scaling factor */
-+#define GTF_J 20
-+ /* C' and M' are part of the Blanking Duty Cycle computation */
-+#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
-+ struct drm_display_mode *drm_mode;
-+ unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
-+ int top_margin, bottom_margin;
-+ int interlace;
-+ unsigned int hfreq_est;
-+ int vsync_plus_bp, vback_porch;
-+ unsigned int vtotal_lines, vfieldrate_est, hperiod;
-+ unsigned int vfield_rate, vframe_rate;
-+ int left_margin, right_margin;
-+ unsigned int total_active_pixels, ideal_duty_cycle;
-+ unsigned int hblank, total_pixels, pixel_freq;
-+ int hsync, hfront_porch, vodd_front_porch_lines;
-+ unsigned int tmp1, tmp2;
-+
-+ drm_mode = drm_mode_create(dev);
-+ if (!drm_mode)
-+ return NULL;
-+
-+ /* 1. In order to give correct results, the number of horizontal
-+ * pixels requested is first processed to ensure that it is divisible
-+ * by the character size, by rounding it to the nearest character
-+ * cell boundary:
-+ */
-+ hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
-+ hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
-+
-+ /* 2. If interlace is requested, the number of vertical lines assumed
-+ * by the calculation must be halved, as the computation calculates
-+ * the number of vertical lines per field.
-+ */
-+ if (interlaced)
-+ vdisplay_rnd = vdisplay / 2;
-+ else
-+ vdisplay_rnd = vdisplay;
-+
-+ /* 3. Find the frame rate required: */
-+ if (interlaced)
-+ vfieldrate_rqd = vrefresh * 2;
-+ else
-+ vfieldrate_rqd = vrefresh;
-+
-+ /* 4. Find number of lines in Top margin: */
-+ top_margin = 0;
-+ if (margins)
-+ top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
-+ 1000;
-+ /* 5. Find number of lines in bottom margin: */
-+ bottom_margin = top_margin;
-+
-+ /* 6. If interlace is required, then set variable interlace: */
-+ if (interlaced)
-+ interlace = 1;
-+ else
-+ interlace = 0;
-+
-+ /* 7. Estimate the Horizontal frequency */
-+ {
-+ tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
-+ tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
-+ 2 + interlace;
-+ hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
-+ }
-+
-+ /* 8. Find the number of lines in V sync + back porch */
-+ /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
-+ vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
-+ vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
-+ /* 9. Find the number of lines in V back porch alone: */
-+ vback_porch = vsync_plus_bp - V_SYNC_RQD;
-+ /* 10. Find the total number of lines in Vertical field period: */
-+ vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
-+ vsync_plus_bp + GTF_MIN_V_PORCH;
-+ /* 11. Estimate the Vertical field frequency: */
-+ vfieldrate_est = hfreq_est / vtotal_lines;
-+ /* 12. Find the actual horizontal period: */
-+ hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
-+
-+ /* 13. Find the actual Vertical field frequency: */
-+ vfield_rate = hfreq_est / vtotal_lines;
-+ /* 14. Find the Vertical frame frequency: */
-+ if (interlaced)
-+ vframe_rate = vfield_rate / 2;
-+ else
-+ vframe_rate = vfield_rate;
-+ /* 15. Find number of pixels in left margin: */
-+ if (margins)
-+ left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
-+ 1000;
-+ else
-+ left_margin = 0;
-+
-+ /* 16.Find number of pixels in right margin: */
-+ right_margin = left_margin;
-+ /* 17.Find total number of active pixels in image and left and right */
-+ total_active_pixels = hdisplay_rnd + left_margin + right_margin;
-+ /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
-+ ideal_duty_cycle = GTF_C_PRIME * 1000 -
-+ (GTF_M_PRIME * 1000000 / hfreq_est);
-+ /* 19.Find the number of pixels in the blanking time to the nearest
-+ * double character cell: */
-+ hblank = total_active_pixels * ideal_duty_cycle /
-+ (100000 - ideal_duty_cycle);
-+ hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
-+ hblank = hblank * 2 * GTF_CELL_GRAN;
-+ /* 20.Find total number of pixels: */
-+ total_pixels = total_active_pixels + hblank;
-+ /* 21.Find pixel clock frequency: */
-+ pixel_freq = total_pixels * hfreq_est / 1000;
-+ /* Stage 1 computations are now complete; I should really pass
-+ * the results to another function and do the Stage 2 computations,
-+ * but I only need a few more values so I'll just append the
-+ * computations here for now */
-+ /* 17. Find the number of pixels in the horizontal sync period: */
-+ hsync = H_SYNC_PERCENT * total_pixels / 100;
-+ hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
-+ hsync = hsync * GTF_CELL_GRAN;
-+ /* 18. Find the number of pixels in horizontal front porch period */
-+ hfront_porch = hblank / 2 - hsync;
-+ /* 36. Find the number of lines in the odd front porch period: */
-+ vodd_front_porch_lines = GTF_MIN_V_PORCH ;
-+
-+ /* finally, pack the results in the mode struct */
-+ drm_mode->hdisplay = hdisplay_rnd;
-+ drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
-+ drm_mode->hsync_end = drm_mode->hsync_start + hsync;
-+ drm_mode->htotal = total_pixels;
-+ drm_mode->vdisplay = vdisplay_rnd;
-+ drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
-+ drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
-+ drm_mode->vtotal = vtotal_lines;
-+
-+ drm_mode->clock = pixel_freq;
-+
-+ drm_mode_set_name(drm_mode);
-+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-+
-+ if (interlaced) {
-+ drm_mode->vtotal *= 2;
-+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
-+ }
-+
-+ return drm_mode;
-+}
-+EXPORT_SYMBOL(drm_gtf_mode);
-+/**
- * drm_mode_set_name - set the name on a mode
- * @mode: name will be set in this mode
- *
-@@ -151,7 +566,9 @@ EXPORT_SYMBOL(drm_mode_height);
- * FIXME: why is this needed? shouldn't vrefresh be set already?
- *
- * RETURNS:
-- * Vertical refresh rate of @mode x 1000. For precision reasons.
-+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
-+ * If it is 70.288, it will return 70Hz.
-+ * If it is 59.6, it will return 60Hz.
- */
- int drm_mode_vrefresh(struct drm_display_mode *mode)
- {
-@@ -161,14 +578,13 @@ int drm_mode_vrefresh(struct drm_display_mode *mode)
- if (mode->vrefresh > 0)
- refresh = mode->vrefresh;
- else if (mode->htotal > 0 && mode->vtotal > 0) {
-+ int vtotal;
-+ vtotal = mode->vtotal;
- /* work out vrefresh the value will be x1000 */
- calc_val = (mode->clock * 1000);
--
- calc_val /= mode->htotal;
-- calc_val *= 1000;
-- calc_val /= mode->vtotal;
-+ refresh = (calc_val + vtotal / 2) / vtotal;
-
-- refresh = calc_val;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- refresh *= 2;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-@@ -403,8 +819,7 @@ void drm_mode_prune_invalid(struct drm_device *dev,
- list_del(&mode->head);
- if (verbose) {
- drm_mode_debug_printmodeline(mode);
-- DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
-- "Not using %s mode %d\n",
-+ DRM_DEBUG_KMS("Not using %s mode %d\n",
- mode->name, mode->status);
- }
- drm_mode_destroy(dev, mode);
-diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
-index bbd4b3d..d379c4f 100644
---- a/drivers/gpu/drm/drm_proc.c
-+++ b/drivers/gpu/drm/drm_proc.c
-@@ -106,20 +106,25 @@ int drm_proc_create_files(struct drm_info_list *files, int count,
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
-- ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
-+ if (tmp == NULL) {
-+ ret = -1;
-+ goto fail;
-+ }
-+ tmp->minor = minor;
-+ tmp->info_ent = &files[i];
-+ list_add(&tmp->list, &minor->proc_nodes.list);
-+
-+ ent = proc_create_data(files[i].name, S_IRUGO, root,
-+ &drm_proc_fops, tmp);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- name, files[i].name);
-+ list_del(&tmp->list);
- kfree(tmp);
- ret = -1;
- goto fail;
- }
-
-- ent->proc_fops = &drm_proc_fops;
-- ent->data = tmp;
-- tmp->minor = minor;
-- tmp->info_ent = &files[i];
-- list_add(&(tmp->list), &(minor->proc_nodes.list));
- }
- return 0;
-
-diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
-index f7a615b..5161172 100644
---- a/drivers/gpu/drm/drm_sysfs.c
-+++ b/drivers/gpu/drm/drm_sysfs.c
-@@ -16,6 +16,7 @@
- #include <linux/kdev_t.h>
- #include <linux/err.h>
-
-+#include "drm_sysfs.h"
- #include "drm_core.h"
- #include "drmP.h"
-
-@@ -253,6 +254,7 @@ static ssize_t subconnector_show(struct device *device,
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
-+ case DRM_MODE_CONNECTOR_TV:
- prop = dev->mode_config.tv_subconnector_property;
- is_tv = 1;
- break;
-@@ -293,6 +295,7 @@ static ssize_t select_subconnector_show(struct device *device,
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
-+ case DRM_MODE_CONNECTOR_TV:
- prop = dev->mode_config.tv_select_subconnector_property;
- is_tv = 1;
- break;
-@@ -391,6 +394,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
-+ case DRM_MODE_CONNECTOR_TV:
- for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
- if (ret)
-@@ -519,3 +523,27 @@ void drm_sysfs_device_remove(struct drm_minor *minor)
- {
- device_unregister(&minor->kdev);
- }
-+
-+
-+/**
-+ * drm_class_device_register - Register a struct device in the drm class.
-+ *
-+ * @dev: pointer to struct device to register.
-+ *
-+ * @dev should have all relevant members pre-filled with the exception
-+ * of the class member. In particular, the device_type member must
-+ * be set.
-+ */
-+
-+int drm_class_device_register(struct device *dev)
-+{
-+ dev->class = drm_class;
-+ return device_register(dev);
-+}
-+EXPORT_SYMBOL_GPL(drm_class_device_register);
-+
-+void drm_class_device_unregister(struct device *dev)
-+{
-+ return device_unregister(dev);
-+}
-+EXPORT_SYMBOL_GPL(drm_class_device_unregister);
-diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
-index 30d6b99..5269dfa 100644
---- a/drivers/gpu/drm/i915/Makefile
-+++ b/drivers/gpu/drm/i915/Makefile
-@@ -4,10 +4,10 @@
-
- ccflags-y := -Iinclude/drm
- i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
-+ i915_debugfs.o \
- i915_suspend.o \
- i915_gem.o \
- i915_gem_debug.o \
-- i915_gem_debugfs.o \
- i915_gem_tiling.o \
- intel_display.o \
- intel_crt.o \
-diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
-new file mode 100644
-index 0000000..1e3bdce
---- /dev/null
-+++ b/drivers/gpu/drm/i915/i915_debugfs.c
-@@ -0,0 +1,445 @@
-+/*
-+ * Copyright © 2008 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ * Keith Packard <keithp@keithp.com>
-+ *
-+ */
-+
-+#include <linux/seq_file.h>
-+#include "drmP.h"
-+#include "drm.h"
-+#include "i915_drm.h"
-+#include "i915_drv.h"
-+
-+#define DRM_I915_RING_DEBUG 1
-+
-+
-+#if defined(CONFIG_DEBUG_FS)
-+
-+#define ACTIVE_LIST 1
-+#define FLUSHING_LIST 2
-+#define INACTIVE_LIST 3
-+
-+static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
-+{
-+ if (obj_priv->user_pin_count > 0)
-+ return "P";
-+ else if (obj_priv->pin_count > 0)
-+ return "p";
-+ else
-+ return " ";
-+}
-+
-+static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
-+{
-+ switch (obj_priv->tiling_mode) {
-+ default:
-+ case I915_TILING_NONE: return " ";
-+ case I915_TILING_X: return "X";
-+ case I915_TILING_Y: return "Y";
-+ }
-+}
-+
-+static int i915_gem_object_list_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ uintptr_t list = (uintptr_t) node->info_ent->data;
-+ struct list_head *head;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv;
-+ spinlock_t *lock = NULL;
-+
-+ switch (list) {
-+ case ACTIVE_LIST:
-+ seq_printf(m, "Active:\n");
-+ lock = &dev_priv->mm.active_list_lock;
-+ head = &dev_priv->mm.active_list;
-+ break;
-+ case INACTIVE_LIST:
-+ seq_printf(m, "Inactive:\n");
-+ head = &dev_priv->mm.inactive_list;
-+ break;
-+ case FLUSHING_LIST:
-+ seq_printf(m, "Flushing:\n");
-+ head = &dev_priv->mm.flushing_list;
-+ break;
-+ default:
-+ DRM_INFO("Ooops, unexpected list\n");
-+ return 0;
-+ }
-+
-+ if (lock)
-+ spin_lock(lock);
-+ list_for_each_entry(obj_priv, head, list)
-+ {
-+ struct drm_gem_object *obj = obj_priv->obj;
-+
-+ seq_printf(m, " %p: %s %08x %08x %d",
-+ obj,
-+ get_pin_flag(obj_priv),
-+ obj->read_domains, obj->write_domain,
-+ obj_priv->last_rendering_seqno);
-+
-+ if (obj->name)
-+ seq_printf(m, " (name: %d)", obj->name);
-+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-+ seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
-+ if (obj_priv->gtt_space != NULL)
-+ seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
-+
-+ seq_printf(m, "\n");
-+ }
-+
-+ if (lock)
-+ spin_unlock(lock);
-+ return 0;
-+}
-+
-+static int i915_gem_request_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_request *gem_request;
-+
-+ seq_printf(m, "Request:\n");
-+ list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
-+ seq_printf(m, " %d @ %d\n",
-+ gem_request->seqno,
-+ (int) (jiffies - gem_request->emitted_jiffies));
-+ }
-+ return 0;
-+}
-+
-+static int i915_gem_seqno_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ if (dev_priv->hw_status_page != NULL) {
-+ seq_printf(m, "Current sequence: %d\n",
-+ i915_get_gem_seqno(dev));
-+ } else {
-+ seq_printf(m, "Current sequence: hws uninitialized\n");
-+ }
-+ seq_printf(m, "Waiter sequence: %d\n",
-+ dev_priv->mm.waiting_gem_seqno);
-+ seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
-+ return 0;
-+}
-+
-+
-+static int i915_interrupt_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ if (!IS_IGDNG(dev)) {
-+ seq_printf(m, "Interrupt enable: %08x\n",
-+ I915_READ(IER));
-+ seq_printf(m, "Interrupt identity: %08x\n",
-+ I915_READ(IIR));
-+ seq_printf(m, "Interrupt mask: %08x\n",
-+ I915_READ(IMR));
-+ seq_printf(m, "Pipe A stat: %08x\n",
-+ I915_READ(PIPEASTAT));
-+ seq_printf(m, "Pipe B stat: %08x\n",
-+ I915_READ(PIPEBSTAT));
-+ } else {
-+ seq_printf(m, "North Display Interrupt enable: %08x\n",
-+ I915_READ(DEIER));
-+ seq_printf(m, "North Display Interrupt identity: %08x\n",
-+ I915_READ(DEIIR));
-+ seq_printf(m, "North Display Interrupt mask: %08x\n",
-+ I915_READ(DEIMR));
-+ seq_printf(m, "South Display Interrupt enable: %08x\n",
-+ I915_READ(SDEIER));
-+ seq_printf(m, "South Display Interrupt identity: %08x\n",
-+ I915_READ(SDEIIR));
-+ seq_printf(m, "South Display Interrupt mask: %08x\n",
-+ I915_READ(SDEIMR));
-+ seq_printf(m, "Graphics Interrupt enable: %08x\n",
-+ I915_READ(GTIER));
-+ seq_printf(m, "Graphics Interrupt identity: %08x\n",
-+ I915_READ(GTIIR));
-+ seq_printf(m, "Graphics Interrupt mask: %08x\n",
-+ I915_READ(GTIMR));
-+ }
-+ seq_printf(m, "Interrupts received: %d\n",
-+ atomic_read(&dev_priv->irq_received));
-+ if (dev_priv->hw_status_page != NULL) {
-+ seq_printf(m, "Current sequence: %d\n",
-+ i915_get_gem_seqno(dev));
-+ } else {
-+ seq_printf(m, "Current sequence: hws uninitialized\n");
-+ }
-+ seq_printf(m, "Waiter sequence: %d\n",
-+ dev_priv->mm.waiting_gem_seqno);
-+ seq_printf(m, "IRQ sequence: %d\n",
-+ dev_priv->mm.irq_gem_seqno);
-+ return 0;
-+}
-+
-+static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ int i;
-+
-+ seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
-+ seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
-+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
-+ struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
-+
-+ if (obj == NULL) {
-+ seq_printf(m, "Fenced object[%2d] = unused\n", i);
-+ } else {
-+ struct drm_i915_gem_object *obj_priv;
-+
-+ obj_priv = obj->driver_private;
-+ seq_printf(m, "Fenced object[%2d] = %p: %s "
-+ "%08x %08zx %08x %s %08x %08x %d",
-+ i, obj, get_pin_flag(obj_priv),
-+ obj_priv->gtt_offset,
-+ obj->size, obj_priv->stride,
-+ get_tiling_flag(obj_priv),
-+ obj->read_domains, obj->write_domain,
-+ obj_priv->last_rendering_seqno);
-+ if (obj->name)
-+ seq_printf(m, " (name: %d)", obj->name);
-+ seq_printf(m, "\n");
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static int i915_hws_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ int i;
-+ volatile u32 *hws;
-+
-+ hws = (volatile u32 *)dev_priv->hw_status_page;
-+ if (hws == NULL)
-+ return 0;
-+
-+ for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
-+ seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
-+ i * 4,
-+ hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
-+ }
-+ return 0;
-+}
-+
-+static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
-+{
-+ int page, i;
-+ uint32_t *mem;
-+
-+ for (page = 0; page < page_count; page++) {
-+ mem = kmap(pages[page]);
-+ for (i = 0; i < PAGE_SIZE; i += 4)
-+ seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
-+ kunmap(pages[page]);
-+ }
-+}
-+
-+static int i915_batchbuffer_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ int ret;
-+
-+ spin_lock(&dev_priv->mm.active_list_lock);
-+
-+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-+ obj = obj_priv->obj;
-+ if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
-+ ret = i915_gem_object_get_pages(obj);
-+ if (ret) {
-+ DRM_ERROR("Failed to get pages: %d\n", ret);
-+ spin_unlock(&dev_priv->mm.active_list_lock);
-+ return ret;
-+ }
-+
-+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
-+ i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
-+
-+ i915_gem_object_put_pages(obj);
-+ }
-+ }
-+
-+ spin_unlock(&dev_priv->mm.active_list_lock);
-+
-+ return 0;
-+}
-+
-+static int i915_ringbuffer_data(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ u8 *virt;
-+ uint32_t *ptr, off;
-+
-+ if (!dev_priv->ring.ring_obj) {
-+ seq_printf(m, "No ringbuffer setup\n");
-+ return 0;
-+ }
-+
-+ virt = dev_priv->ring.virtual_start;
-+
-+ for (off = 0; off < dev_priv->ring.Size; off += 4) {
-+ ptr = (uint32_t *)(virt + off);
-+ seq_printf(m, "%08x : %08x\n", off, *ptr);
-+ }
-+
-+ return 0;
-+}
-+
-+static int i915_ringbuffer_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ unsigned int head, tail;
-+
-+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-+ tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-+
-+ seq_printf(m, "RingHead : %08x\n", head);
-+ seq_printf(m, "RingTail : %08x\n", tail);
-+ seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
-+ seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
-+
-+ return 0;
-+}
-+
-+static int i915_error_state(struct seq_file *m, void *unused)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_error_state *error;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&dev_priv->error_lock, flags);
-+ if (!dev_priv->first_error) {
-+ seq_printf(m, "no error state collected\n");
-+ goto out;
-+ }
-+
-+ error = dev_priv->first_error;
-+
-+ seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
-+ error->time.tv_usec);
-+ seq_printf(m, "EIR: 0x%08x\n", error->eir);
-+ seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
-+ seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
-+ seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
-+ seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
-+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
-+ if (IS_I965G(dev)) {
-+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
-+ seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
-+ }
-+
-+out:
-+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
-+
-+ return 0;
-+}
-+
-+static int i915_registers_info(struct seq_file *m, void *data) {
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ uint32_t reg;
-+
-+#define DUMP_RANGE(start, end) \
-+ for (reg=start; reg < end; reg += 4) \
-+ seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
-+
-+ DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
-+ DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
-+ DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
-+ DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
-+ DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
-+ DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
-+ DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
-+ DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
-+ DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
-+ DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
-+ DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
-+ DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
-+ DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
-+ DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
-+
-+ return 0;
-+}
-+
-+
-+static struct drm_info_list i915_debugfs_list[] = {
-+ {"i915_regs", i915_registers_info, 0},
-+ {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-+ {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
-+ {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
-+ {"i915_gem_request", i915_gem_request_info, 0},
-+ {"i915_gem_seqno", i915_gem_seqno_info, 0},
-+ {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
-+ {"i915_gem_interrupt", i915_interrupt_info, 0},
-+ {"i915_gem_hws", i915_hws_info, 0},
-+ {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
-+ {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
-+ {"i915_batchbuffers", i915_batchbuffer_info, 0},
-+ {"i915_error_state", i915_error_state, 0},
-+};
-+#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
-+
-+int i915_debugfs_init(struct drm_minor *minor)
-+{
-+ return drm_debugfs_create_files(i915_debugfs_list,
-+ I915_DEBUGFS_ENTRIES,
-+ minor->debugfs_root, minor);
-+}
-+
-+void i915_debugfs_cleanup(struct drm_minor *minor)
-+{
-+ drm_debugfs_remove_files(i915_debugfs_list,
-+ I915_DEBUGFS_ENTRIES, minor);
-+}
-+
-+#endif /* CONFIG_DEBUG_FS */
-+
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 50d1f78..9909505 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -29,12 +29,11 @@
- #include "drmP.h"
- #include "drm.h"
- #include "drm_crtc_helper.h"
-+#include "drm_fb_helper.h"
- #include "intel_drv.h"
- #include "i915_drm.h"
- #include "i915_drv.h"
-
--#define I915_DRV "i915_drv"
--
- /* Really want an OS-independent resettable timer. Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
-@@ -80,6 +79,34 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
- return -EBUSY;
- }
-
-+/* As a ringbuffer is only allowed to wrap between instructions, fill
-+ * the tail with NOOPs.
-+ */
-+int i915_wrap_ring(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ volatile unsigned int *virt;
-+ int rem;
-+
-+ rem = dev_priv->ring.Size - dev_priv->ring.tail;
-+ if (dev_priv->ring.space < rem) {
-+ int ret = i915_wait_ring(dev, rem, __func__);
-+ if (ret)
-+ return ret;
-+ }
-+ dev_priv->ring.space -= rem;
-+
-+ virt = (unsigned int *)
-+ (dev_priv->ring.virtual_start + dev_priv->ring.tail);
-+ rem /= 4;
-+ while (rem--)
-+ *virt++ = MI_NOOP;
-+
-+ dev_priv->ring.tail = 0;
-+
-+ return 0;
-+}
-+
- /**
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
-@@ -101,7 +128,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
-- DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
-+ DRM_DEBUG_DRIVER("Enabled hardware status page\n");
- return 0;
- }
-
-@@ -187,8 +214,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
- master_priv->sarea_priv = (drm_i915_sarea_t *)
- ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
- } else {
-- DRM_DEBUG_DRIVER(I915_DRV,
-- "sarea not found assuming DRI2 userspace\n");
-+ DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
- }
-
- if (init->ring_size != 0) {
-@@ -200,7 +226,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
- }
-
- dev_priv->ring.Size = init->ring_size;
-- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->ring.map.offset = init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
-@@ -238,7 +263,7 @@ static int i915_dma_resume(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-- DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
-+ DRM_DEBUG_DRIVER("%s\n", __func__);
-
- if (dev_priv->ring.map.handle == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
-@@ -251,14 +276,14 @@ static int i915_dma_resume(struct drm_device * dev)
- DRM_ERROR("Can not find hardware status page\n");
- return -EINVAL;
- }
-- DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n",
-+ DRM_DEBUG_DRIVER("hw status page @ %p\n",
- dev_priv->hw_status_page);
-
- if (dev_priv->status_gfx_addr != 0)
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- else
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
-- DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
-+ DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-
- return 0;
- }
-@@ -552,7 +577,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
- if (!master_priv->sarea_priv)
- return -EINVAL;
-
-- DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n",
-+ DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->current_page,
- master_priv->sarea_priv->pf_current_page);
-@@ -633,8 +658,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
- return -EINVAL;
- }
-
-- DRM_DEBUG_DRIVER(I915_DRV,
-- "i915 batchbuffer, start %x used %d cliprects %d\n",
-+ DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
- batch->start, batch->used, batch->num_cliprects);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-@@ -681,8 +705,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
- void *batch_data;
- int ret;
-
-- DRM_DEBUG_DRIVER(I915_DRV,
-- "i915 cmdbuffer, buf %p sz %d cliprects %d\n",
-+ DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
- cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-@@ -735,7 +758,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
- {
- int ret;
-
-- DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
-+ DRM_DEBUG_DRIVER("%s\n", __func__);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-@@ -778,7 +801,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
- value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
- break;
- default:
-- DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n",
-+ DRM_DEBUG_DRIVER("Unknown parameter %d\n",
- param->param);
- return -EINVAL;
- }
-@@ -819,7 +842,7 @@ static int i915_setparam(struct drm_device *dev, void *data,
- dev_priv->fence_reg_start = param->value;
- break;
- default:
-- DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n",
-+ DRM_DEBUG_DRIVER("unknown parameter %d\n",
- param->param);
- return -EINVAL;
- }
-@@ -846,7 +869,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
- return 0;
- }
-
-- DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
-+ DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
-
- dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
-
-@@ -868,13 +891,25 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-- DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n",
-+ DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
-- DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n",
-+ DRM_DEBUG_DRIVER("load hws at %p\n",
- dev_priv->hw_status_page);
- return 0;
- }
-
-+static int i915_get_bridge_dev(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+ dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
-+ if (!dev_priv->bridge_dev) {
-+ DRM_ERROR("bridge device not found\n");
-+ return -1;
-+ }
-+ return 0;
-+}
-+
- /**
- * i915_probe_agp - get AGP bootup configuration
- * @pdev: PCI device
-@@ -888,20 +923,13 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
- static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
- uint32_t *preallocated_size)
- {
-- struct pci_dev *bridge_dev;
-+ struct drm_i915_private *dev_priv = dev->dev_private;
- u16 tmp = 0;
- unsigned long overhead;
- unsigned long stolen;
-
-- bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
-- if (!bridge_dev) {
-- DRM_ERROR("bridge device not found\n");
-- return -1;
-- }
--
- /* Get the fb aperture size and "stolen" memory amount. */
-- pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
-- pci_dev_put(bridge_dev);
-+ pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
-
- *aperture_size = 1024 * 1024;
- *preallocated_size = 1024 * 1024;
-@@ -1153,11 +1181,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
-
-+ if (i915_get_bridge_dev(dev)) {
-+ ret = -EIO;
-+ goto free_priv;
-+ }
-+
- dev_priv->regs = ioremap(base, size);
- if (!dev_priv->regs) {
- DRM_ERROR("failed to map registers\n");
- ret = -EIO;
-- goto free_priv;
-+ goto put_bridge;
- }
-
- dev_priv->mm.gtt_mapping =
-@@ -1269,6 +1302,8 @@ out_iomapfree:
- io_mapping_free(dev_priv->mm.gtt_mapping);
- out_rmmap:
- iounmap(dev_priv->regs);
-+put_bridge:
-+ pci_dev_put(dev_priv->bridge_dev);
- free_priv:
- kfree(dev_priv);
- return ret;
-@@ -1312,6 +1347,7 @@ int i915_driver_unload(struct drm_device *dev)
- i915_gem_lastclose(dev);
- }
-
-+ pci_dev_put(dev_priv->bridge_dev);
- kfree(dev->dev_private);
-
- return 0;
-@@ -1321,7 +1357,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
- {
- struct drm_i915_file_private *i915_file_priv;
-
-- DRM_DEBUG_DRIVER(I915_DRV, "\n");
-+ DRM_DEBUG_DRIVER("\n");
- i915_file_priv = (struct drm_i915_file_private *)
- kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
-
-@@ -1352,7 +1388,7 @@ void i915_driver_lastclose(struct drm_device * dev)
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
-- intelfb_restore();
-+ drm_fb_helper_restore();
- return;
- }
-
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index fc4b68a..dbe568c 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -37,12 +37,15 @@
- #include <linux/console.h>
- #include "drm_crtc_helper.h"
-
--static unsigned int i915_modeset = -1;
-+static int i915_modeset = -1;
- module_param_named(modeset, i915_modeset, int, 0400);
-
- unsigned int i915_fbpercrtc = 0;
- module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-
-+unsigned int i915_powersave = 1;
-+module_param_named(powersave, i915_powersave, int, 0400);
-+
- static struct drm_driver driver;
-
- static struct pci_device_id pciidlist[] = {
-@@ -188,8 +191,8 @@ static struct drm_driver driver = {
- .master_create = i915_master_create,
- .master_destroy = i915_master_destroy,
- #if defined(CONFIG_DEBUG_FS)
-- .debugfs_init = i915_gem_debugfs_init,
-- .debugfs_cleanup = i915_gem_debugfs_cleanup,
-+ .debugfs_init = i915_debugfs_init,
-+ .debugfs_cleanup = i915_debugfs_cleanup,
- #endif
- .gem_init_object = i915_gem_init_object,
- .gem_free_object = i915_gem_free_object,
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 5b4f87e..77ed060 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -85,7 +85,6 @@ struct drm_i915_gem_phys_object {
- };
-
- typedef struct _drm_i915_ring_buffer {
-- int tail_mask;
- unsigned long Size;
- u8 *virtual_start;
- int head;
-@@ -156,6 +155,7 @@ typedef struct drm_i915_private {
-
- void __iomem *regs;
-
-+ struct pci_dev *bridge_dev;
- drm_i915_ring_buffer_t ring;
-
- drm_dma_handle_t *status_page_dmah;
-@@ -311,7 +311,7 @@ typedef struct drm_i915_private {
- u32 saveIMR;
- u32 saveCACHE_MODE_0;
- u32 saveD_STATE;
-- u32 saveCG_2D_DIS;
-+ u32 saveDSPCLK_GATE_D;
- u32 saveMI_ARB_STATE;
- u32 saveSWF0[16];
- u32 saveSWF1[16];
-@@ -443,6 +443,14 @@ typedef struct drm_i915_private {
- struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
- } mm;
- struct sdvo_device_mapping sdvo_mappings[2];
-+
-+ /* Reclocking support */
-+ bool render_reclock_avail;
-+ bool lvds_downclock_avail;
-+ struct work_struct idle_work;
-+ struct timer_list idle_timer;
-+ bool busy;
-+ u16 orig_clock;
- } drm_i915_private_t;
-
- /** driver private structure attached to each drm_gem_object */
-@@ -575,6 +583,7 @@ enum intel_chip_family {
- extern struct drm_ioctl_desc i915_ioctls[];
- extern int i915_max_ioctl;
- extern unsigned int i915_fbpercrtc;
-+extern unsigned int i915_powersave;
-
- extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
- extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
-@@ -730,8 +739,8 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
- void i915_dump_lru(struct drm_device *dev, const char *where);
-
- /* i915_debugfs.c */
--int i915_gem_debugfs_init(struct drm_minor *minor);
--void i915_gem_debugfs_cleanup(struct drm_minor *minor);
-+int i915_debugfs_init(struct drm_minor *minor);
-+void i915_debugfs_cleanup(struct drm_minor *minor);
-
- /* i915_suspend.c */
- extern int i915_save_state(struct drm_device *dev);
-@@ -781,33 +790,32 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
-
- #define I915_VERBOSE 0
-
--#define RING_LOCALS unsigned int outring, ringmask, outcount; \
-- volatile char *virt;
--
--#define BEGIN_LP_RING(n) do { \
-- if (I915_VERBOSE) \
-- DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
-- if (dev_priv->ring.space < (n)*4) \
-- i915_wait_ring(dev, (n)*4, __func__); \
-- outcount = 0; \
-- outring = dev_priv->ring.tail; \
-- ringmask = dev_priv->ring.tail_mask; \
-- virt = dev_priv->ring.virtual_start; \
-+#define RING_LOCALS volatile unsigned int *ring_virt__;
-+
-+#define BEGIN_LP_RING(n) do { \
-+ int bytes__ = 4*(n); \
-+ if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
-+ /* a wrap must occur between instructions so pad beforehand */ \
-+ if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
-+ i915_wrap_ring(dev); \
-+ if (unlikely (dev_priv->ring.space < bytes__)) \
-+ i915_wait_ring(dev, bytes__, __func__); \
-+ ring_virt__ = (unsigned int *) \
-+ (dev_priv->ring.virtual_start + dev_priv->ring.tail); \
-+ dev_priv->ring.tail += bytes__; \
-+ dev_priv->ring.tail &= dev_priv->ring.Size - 1; \
-+ dev_priv->ring.space -= bytes__; \
- } while (0)
-
--#define OUT_RING(n) do { \
-+#define OUT_RING(n) do { \
- if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
-- *(volatile unsigned int *)(virt + outring) = (n); \
-- outcount++; \
-- outring += 4; \
-- outring &= ringmask; \
-+ *ring_virt__++ = (n); \
- } while (0)
-
- #define ADVANCE_LP_RING() do { \
-- if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
-- dev_priv->ring.tail = outring; \
-- dev_priv->ring.space -= outcount * 4; \
-- I915_WRITE(PRB0_TAIL, outring); \
-+ if (I915_VERBOSE) \
-+ DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \
-+ I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \
- } while(0)
-
- /**
-@@ -830,6 +838,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
- #define I915_GEM_HWS_INDEX 0x20
- #define I915_BREADCRUMB_INDEX 0x21
-
-+extern int i915_wrap_ring(struct drm_device * dev);
- extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
- #define IS_I830(dev) ((dev)->pci_device == 0x3577)
-@@ -903,6 +912,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
- /* dsparb controlled by hw only */
- #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-
-+#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
-+#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-+
- #define PRIMARY_RINGBUFFER_SIZE (128*1024)
-
- #endif
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 7edb5b9..954fb69 100644
---- a/drivers/gpu/drm/i915/i915_gem.c
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -29,6 +29,7 @@
- #include "drm.h"
- #include "i915_drm.h"
- #include "i915_drv.h"
-+#include "intel_drv.h"
- #include <linux/swap.h>
- #include <linux/pci.h>
-
-@@ -111,7 +112,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
- {
- struct drm_i915_gem_create *args = data;
- struct drm_gem_object *obj;
-- int handle, ret;
-+ int ret;
-+ u32 handle;
-
- args->size = roundup(args->size, PAGE_SIZE);
-
-@@ -981,6 +983,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_set_domain *args = data;
- struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
- uint32_t read_domains = args->read_domains;
- uint32_t write_domain = args->write_domain;
- int ret;
-@@ -1004,15 +1007,17 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
-+ obj_priv = obj->driver_private;
-
- mutex_lock(&dev->struct_mutex);
-+
-+ intel_mark_busy(dev, obj);
-+
- #if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
- obj, obj->size, read_domains, write_domain);
- #endif
- if (read_domains & I915_GEM_DOMAIN_GTT) {
-- struct drm_i915_gem_object *obj_priv = obj->driver_private;
--
- ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
-
- /* Update the LRU on the fence for the CPU access that's
-@@ -2776,6 +2781,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
- BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
- BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
-
-+ intel_mark_busy(dev, obj);
-+
- #if WATCH_BUF
- DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
- __func__, obj,
-@@ -4093,7 +4100,6 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
-
- /* Set up the kernel mapping for the ring. */
- ring->Size = obj->size;
-- ring->tail_mask = obj->size - 1;
-
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
- ring->map.size = obj->size;
-diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
-deleted file mode 100644
-index cb3b974..0000000
---- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
-+++ /dev/null
-@@ -1,396 +0,0 @@
--/*
-- * Copyright © 2008 Intel Corporation
-- *
-- * Permission is hereby granted, free of charge, to any person obtaining a
-- * copy of this software and associated documentation files (the "Software"),
-- * to deal in the Software without restriction, including without limitation
-- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-- * and/or sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following conditions:
-- *
-- * The above copyright notice and this permission notice (including the next
-- * paragraph) shall be included in all copies or substantial portions of the
-- * Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-- * IN THE SOFTWARE.
-- *
-- * Authors:
-- * Eric Anholt <eric@anholt.net>
-- * Keith Packard <keithp@keithp.com>
-- *
-- */
--
--#include <linux/seq_file.h>
--#include "drmP.h"
--#include "drm.h"
--#include "i915_drm.h"
--#include "i915_drv.h"
--
--#define DRM_I915_RING_DEBUG 1
--
--
--#if defined(CONFIG_DEBUG_FS)
--
--#define ACTIVE_LIST 1
--#define FLUSHING_LIST 2
--#define INACTIVE_LIST 3
--
--static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
--{
-- if (obj_priv->user_pin_count > 0)
-- return "P";
-- else if (obj_priv->pin_count > 0)
-- return "p";
-- else
-- return " ";
--}
--
--static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
--{
-- switch (obj_priv->tiling_mode) {
-- default:
-- case I915_TILING_NONE: return " ";
-- case I915_TILING_X: return "X";
-- case I915_TILING_Y: return "Y";
-- }
--}
--
--static int i915_gem_object_list_info(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- uintptr_t list = (uintptr_t) node->info_ent->data;
-- struct list_head *head;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- struct drm_i915_gem_object *obj_priv;
-- spinlock_t *lock = NULL;
--
-- switch (list) {
-- case ACTIVE_LIST:
-- seq_printf(m, "Active:\n");
-- lock = &dev_priv->mm.active_list_lock;
-- head = &dev_priv->mm.active_list;
-- break;
-- case INACTIVE_LIST:
-- seq_printf(m, "Inactive:\n");
-- head = &dev_priv->mm.inactive_list;
-- break;
-- case FLUSHING_LIST:
-- seq_printf(m, "Flushing:\n");
-- head = &dev_priv->mm.flushing_list;
-- break;
-- default:
-- DRM_INFO("Ooops, unexpected list\n");
-- return 0;
-- }
--
-- if (lock)
-- spin_lock(lock);
-- list_for_each_entry(obj_priv, head, list)
-- {
-- struct drm_gem_object *obj = obj_priv->obj;
--
-- seq_printf(m, " %p: %s %08x %08x %d",
-- obj,
-- get_pin_flag(obj_priv),
-- obj->read_domains, obj->write_domain,
-- obj_priv->last_rendering_seqno);
--
-- if (obj->name)
-- seq_printf(m, " (name: %d)", obj->name);
-- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-- seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
-- if (obj_priv->gtt_space != NULL)
-- seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
--
-- seq_printf(m, "\n");
-- }
--
-- if (lock)
-- spin_unlock(lock);
-- return 0;
--}
--
--static int i915_gem_request_info(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- struct drm_i915_gem_request *gem_request;
--
-- seq_printf(m, "Request:\n");
-- list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
-- seq_printf(m, " %d @ %d\n",
-- gem_request->seqno,
-- (int) (jiffies - gem_request->emitted_jiffies));
-- }
-- return 0;
--}
--
--static int i915_gem_seqno_info(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
--
-- if (dev_priv->hw_status_page != NULL) {
-- seq_printf(m, "Current sequence: %d\n",
-- i915_get_gem_seqno(dev));
-- } else {
-- seq_printf(m, "Current sequence: hws uninitialized\n");
-- }
-- seq_printf(m, "Waiter sequence: %d\n",
-- dev_priv->mm.waiting_gem_seqno);
-- seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
-- return 0;
--}
--
--
--static int i915_interrupt_info(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
--
-- seq_printf(m, "Interrupt enable: %08x\n",
-- I915_READ(IER));
-- seq_printf(m, "Interrupt identity: %08x\n",
-- I915_READ(IIR));
-- seq_printf(m, "Interrupt mask: %08x\n",
-- I915_READ(IMR));
-- seq_printf(m, "Pipe A stat: %08x\n",
-- I915_READ(PIPEASTAT));
-- seq_printf(m, "Pipe B stat: %08x\n",
-- I915_READ(PIPEBSTAT));
-- seq_printf(m, "Interrupts received: %d\n",
-- atomic_read(&dev_priv->irq_received));
-- if (dev_priv->hw_status_page != NULL) {
-- seq_printf(m, "Current sequence: %d\n",
-- i915_get_gem_seqno(dev));
-- } else {
-- seq_printf(m, "Current sequence: hws uninitialized\n");
-- }
-- seq_printf(m, "Waiter sequence: %d\n",
-- dev_priv->mm.waiting_gem_seqno);
-- seq_printf(m, "IRQ sequence: %d\n",
-- dev_priv->mm.irq_gem_seqno);
-- return 0;
--}
--
--static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- int i;
--
-- seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
-- seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
-- for (i = 0; i < dev_priv->num_fence_regs; i++) {
-- struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
--
-- if (obj == NULL) {
-- seq_printf(m, "Fenced object[%2d] = unused\n", i);
-- } else {
-- struct drm_i915_gem_object *obj_priv;
--
-- obj_priv = obj->driver_private;
-- seq_printf(m, "Fenced object[%2d] = %p: %s "
-- "%08x %08zx %08x %s %08x %08x %d",
-- i, obj, get_pin_flag(obj_priv),
-- obj_priv->gtt_offset,
-- obj->size, obj_priv->stride,
-- get_tiling_flag(obj_priv),
-- obj->read_domains, obj->write_domain,
-- obj_priv->last_rendering_seqno);
-- if (obj->name)
-- seq_printf(m, " (name: %d)", obj->name);
-- seq_printf(m, "\n");
-- }
-- }
--
-- return 0;
--}
--
--static int i915_hws_info(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- int i;
-- volatile u32 *hws;
--
-- hws = (volatile u32 *)dev_priv->hw_status_page;
-- if (hws == NULL)
-- return 0;
--
-- for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
-- seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
-- i * 4,
-- hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
-- }
-- return 0;
--}
--
--static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
--{
-- int page, i;
-- uint32_t *mem;
--
-- for (page = 0; page < page_count; page++) {
-- mem = kmap(pages[page]);
-- for (i = 0; i < PAGE_SIZE; i += 4)
-- seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
-- kunmap(pages[page]);
-- }
--}
--
--static int i915_batchbuffer_info(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- struct drm_gem_object *obj;
-- struct drm_i915_gem_object *obj_priv;
-- int ret;
--
-- spin_lock(&dev_priv->mm.active_list_lock);
--
-- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
-- obj = obj_priv->obj;
-- if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
-- ret = i915_gem_object_get_pages(obj);
-- if (ret) {
-- DRM_ERROR("Failed to get pages: %d\n", ret);
-- spin_unlock(&dev_priv->mm.active_list_lock);
-- return ret;
-- }
--
-- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
-- i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
--
-- i915_gem_object_put_pages(obj);
-- }
-- }
--
-- spin_unlock(&dev_priv->mm.active_list_lock);
--
-- return 0;
--}
--
--static int i915_ringbuffer_data(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- u8 *virt;
-- uint32_t *ptr, off;
--
-- if (!dev_priv->ring.ring_obj) {
-- seq_printf(m, "No ringbuffer setup\n");
-- return 0;
-- }
--
-- virt = dev_priv->ring.virtual_start;
--
-- for (off = 0; off < dev_priv->ring.Size; off += 4) {
-- ptr = (uint32_t *)(virt + off);
-- seq_printf(m, "%08x : %08x\n", off, *ptr);
-- }
--
-- return 0;
--}
--
--static int i915_ringbuffer_info(struct seq_file *m, void *data)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- unsigned int head, tail, mask;
--
-- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-- tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-- mask = dev_priv->ring.tail_mask;
--
-- seq_printf(m, "RingHead : %08x\n", head);
-- seq_printf(m, "RingTail : %08x\n", tail);
-- seq_printf(m, "RingMask : %08x\n", mask);
-- seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
-- seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
--
-- return 0;
--}
--
--static int i915_error_state(struct seq_file *m, void *unused)
--{
-- struct drm_info_node *node = (struct drm_info_node *) m->private;
-- struct drm_device *dev = node->minor->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- struct drm_i915_error_state *error;
-- unsigned long flags;
--
-- spin_lock_irqsave(&dev_priv->error_lock, flags);
-- if (!dev_priv->first_error) {
-- seq_printf(m, "no error state collected\n");
-- goto out;
-- }
--
-- error = dev_priv->first_error;
--
-- seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
-- error->time.tv_usec);
-- seq_printf(m, "EIR: 0x%08x\n", error->eir);
-- seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
-- seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
-- seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
-- seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
-- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
-- if (IS_I965G(dev)) {
-- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
-- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
-- }
--
--out:
-- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
--
-- return 0;
--}
--
--static struct drm_info_list i915_gem_debugfs_list[] = {
-- {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-- {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
-- {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
-- {"i915_gem_request", i915_gem_request_info, 0},
-- {"i915_gem_seqno", i915_gem_seqno_info, 0},
-- {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
-- {"i915_gem_interrupt", i915_interrupt_info, 0},
-- {"i915_gem_hws", i915_hws_info, 0},
-- {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
-- {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
-- {"i915_batchbuffers", i915_batchbuffer_info, 0},
-- {"i915_error_state", i915_error_state, 0},
--};
--#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
--
--int i915_gem_debugfs_init(struct drm_minor *minor)
--{
-- return drm_debugfs_create_files(i915_gem_debugfs_list,
-- I915_GEM_DEBUGFS_ENTRIES,
-- minor->debugfs_root, minor);
--}
--
--void i915_gem_debugfs_cleanup(struct drm_minor *minor)
--{
-- drm_debugfs_remove_files(i915_gem_debugfs_list,
-- I915_GEM_DEBUGFS_ENTRIES, minor);
--}
--
--#endif /* CONFIG_DEBUG_FS */
--
-diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
-index a2d527b..200e398 100644
---- a/drivers/gpu/drm/i915/i915_gem_tiling.c
-+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
-@@ -94,23 +94,15 @@
- static int
- intel_alloc_mchbar_resource(struct drm_device *dev)
- {
-- struct pci_dev *bridge_dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret = 0;
-
-- bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
-- if (!bridge_dev) {
-- DRM_DEBUG("no bridge dev?!\n");
-- ret = -ENODEV;
-- goto out;
-- }
--
- if (IS_I965G(dev))
-- pci_read_config_dword(bridge_dev, reg + 4, &temp_hi);
-- pci_read_config_dword(bridge_dev, reg, &temp_lo);
-+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
-+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-@@ -118,30 +110,28 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
- ret = 0;
-- goto out_put;
-+ goto out;
- }
- #endif
-
- /* Get some space for it */
-- ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
-+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
-- bridge_dev);
-+ dev_priv->bridge_dev);
- if (ret) {
- DRM_DEBUG("failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
-- goto out_put;
-+ goto out;
- }
-
- if (IS_I965G(dev))
-- pci_write_config_dword(bridge_dev, reg + 4,
-+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
-- pci_write_config_dword(bridge_dev, reg,
-+ pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
--out_put:
-- pci_dev_put(bridge_dev);
- out:
- return ret;
- }
-@@ -150,44 +140,36 @@ out:
- static bool
- intel_setup_mchbar(struct drm_device *dev)
- {
-- struct pci_dev *bridge_dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool need_disable = false, enabled;
-
-- bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
-- if (!bridge_dev) {
-- DRM_DEBUG("no bridge dev?!\n");
-- goto out;
-- }
--
- if (IS_I915G(dev) || IS_I915GM(dev)) {
-- pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
-+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
-- pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
-+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
-- goto out_put;
-+ goto out;
-
- if (intel_alloc_mchbar_resource(dev))
-- goto out_put;
-+ goto out;
-
- need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
-- pci_write_config_dword(bridge_dev, DEVEN_REG,
-+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
- temp | DEVEN_MCHBAR_EN);
- } else {
-- pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
-- pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1);
-+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
--out_put:
-- pci_dev_put(bridge_dev);
- out:
- return need_disable;
- }
-@@ -196,25 +178,18 @@ static void
- intel_teardown_mchbar(struct drm_device *dev, bool disable)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-- struct pci_dev *bridge_dev;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
-
-- bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
-- if (!bridge_dev) {
-- DRM_DEBUG("no bridge dev?!\n");
-- return;
-- }
--
- if (disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
-- pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
-+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- temp &= ~DEVEN_MCHBAR_EN;
-- pci_write_config_dword(bridge_dev, DEVEN_REG, temp);
-+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
- } else {
-- pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
-+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- temp &= ~1;
-- pci_write_config_dword(bridge_dev, mchbar_reg, temp);
-+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
- }
- }
-
-@@ -234,7 +209,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- bool need_disable;
-
-- if (!IS_I9XX(dev)) {
-+ if (IS_IGDNG(dev)) {
-+ /* On IGDNG whatever DRAM config, GPU always do
-+ * same swizzling setup.
-+ */
-+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-+ swizzle_y = I915_BIT_6_SWIZZLE_9;
-+ } else if (!IS_I9XX(dev)) {
- /* As far as we know, the 865 doesn't have these bit 6
- * swizzling issues.
- */
-@@ -317,13 +298,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
- }
- }
-
-- /* FIXME: check with memory config on IGDNG */
-- if (IS_IGDNG(dev)) {
-- DRM_ERROR("disable tiling on IGDNG...\n");
-- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-- }
--
- dev_priv->mm.bit_6_swizzle_x = swizzle_x;
- dev_priv->mm.bit_6_swizzle_y = swizzle_y;
- }
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index 7ebc84c..6c89f2f 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -565,6 +565,27 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_READ(PORT_HOTPLUG_STAT);
-+
-+ /* EOS interrupts occurs */
-+ if (IS_IGD(dev) &&
-+ (hotplug_status & CRT_EOS_INT_STATUS)) {
-+ u32 temp;
-+
-+ DRM_DEBUG("EOS interrupt occurs\n");
-+ /* status is already cleared */
-+ temp = I915_READ(ADPA);
-+ temp &= ~ADPA_DAC_ENABLE;
-+ I915_WRITE(ADPA, temp);
-+
-+ temp = I915_READ(PORT_HOTPLUG_EN);
-+ temp &= ~CRT_EOS_INT_EN;
-+ I915_WRITE(PORT_HOTPLUG_EN, temp);
-+
-+ temp = I915_READ(PORT_HOTPLUG_STAT);
-+ if (temp & CRT_EOS_INT_STATUS)
-+ I915_WRITE(PORT_HOTPLUG_STAT,
-+ CRT_EOS_INT_STATUS);
-+ }
- }
-
- I915_WRITE(IIR, iir);
-diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
-index 2955083..e38cd21 100644
---- a/drivers/gpu/drm/i915/i915_reg.h
-+++ b/drivers/gpu/drm/i915/i915_reg.h
-@@ -55,7 +55,7 @@
- /* PCI config space */
-
- #define HPLLCC 0xc0 /* 855 only */
--#define GC_CLOCK_CONTROL_MASK (3 << 0)
-+#define GC_CLOCK_CONTROL_MASK (0xf << 0)
- #define GC_CLOCK_133_200 (0 << 0)
- #define GC_CLOCK_100_200 (1 << 0)
- #define GC_CLOCK_100_133 (2 << 0)
-@@ -65,6 +65,25 @@
- #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
- #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
- #define GC_DISPLAY_CLOCK_MASK (7 << 4)
-+#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
-+#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
-+#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0)
-+#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0)
-+#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0)
-+#define I965_GC_RENDER_CLOCK_MASK (0xf << 0)
-+#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0)
-+#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0)
-+#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0)
-+#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0)
-+#define I945_GC_RENDER_CLOCK_MASK (7 << 0)
-+#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0)
-+#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0)
-+#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0)
-+#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0)
-+#define I915_GC_RENDER_CLOCK_MASK (7 << 0)
-+#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
-+#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
-+#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
- #define LBB 0xf4
-
- /* VGA stuff */
-@@ -553,9 +572,118 @@
- #define DPLLA_TEST_M_BYPASS (1 << 2)
- #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
- #define D_STATE 0x6104
--#define CG_2D_DIS 0x6200
--#define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
--#define CG_3D_DIS 0x6204
-+#define DSTATE_PLL_D3_OFF (1<<3)
-+#define DSTATE_GFX_CLOCK_GATING (1<<1)
-+#define DSTATE_DOT_CLOCK_GATING (1<<0)
-+#define DSPCLK_GATE_D 0x6200
-+# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
-+# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
-+# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
-+# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */
-+# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
-+# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
-+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
-+# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
-+# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
-+# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
-+# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */
-+# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */
-+# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */
-+# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */
-+# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */
-+# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */
-+# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */
-+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */
-+# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */
-+# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
-+# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10)
-+# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-+# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8)
-+# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */
-+# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */
-+# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
-+# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
-+# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
-+/**
-+ * This bit must be set on the 830 to prevent hangs when turning off the
-+ * overlay scaler.
-+ */
-+# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
-+# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2)
-+# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1)
-+# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
-+# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
-+
-+#define RENCLK_GATE_D1 0x6204
-+# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
-+# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
-+# define PC_FE_CLOCK_GATE_DISABLE (1 << 11)
-+# define PC_BE_CLOCK_GATE_DISABLE (1 << 10)
-+# define WINDOWER_CLOCK_GATE_DISABLE (1 << 9)
-+# define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8)
-+# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
-+# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
-+# define MAG_CLOCK_GATE_DISABLE (1 << 5)
-+/** This bit must be unset on 855,865 */
-+# define MECI_CLOCK_GATE_DISABLE (1 << 4)
-+# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
-+# define MEC_CLOCK_GATE_DISABLE (1 << 2)
-+# define MECO_CLOCK_GATE_DISABLE (1 << 1)
-+/** This bit must be set on 855,865. */
-+# define SV_CLOCK_GATE_DISABLE (1 << 0)
-+# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
-+# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
-+# define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14)
-+# define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13)
-+# define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12)
-+# define I915_WM_CLOCK_GATE_DISABLE (1 << 11)
-+# define I915_IZ_CLOCK_GATE_DISABLE (1 << 10)
-+# define I915_PI_CLOCK_GATE_DISABLE (1 << 9)
-+# define I915_DI_CLOCK_GATE_DISABLE (1 << 8)
-+# define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7)
-+# define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6)
-+# define I915_SC_CLOCK_GATE_DISABLE (1 << 5)
-+# define I915_FL_CLOCK_GATE_DISABLE (1 << 4)
-+# define I915_DM_CLOCK_GATE_DISABLE (1 << 3)
-+# define I915_PS_CLOCK_GATE_DISABLE (1 << 2)
-+# define I915_CC_CLOCK_GATE_DISABLE (1 << 1)
-+# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
-+
-+# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
-+/** This bit must always be set on 965G/965GM */
-+# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
-+# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
-+# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
-+# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
-+# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
-+# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
-+/** This bit must always be set on 965G */
-+# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
-+# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
-+# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
-+# define I965_IF_CLOCK_GATE_DISABLE (1 << 20)
-+# define I965_TC_CLOCK_GATE_DISABLE (1 << 19)
-+# define I965_SO_CLOCK_GATE_DISABLE (1 << 17)
-+# define I965_FBC_CLOCK_GATE_DISABLE (1 << 16)
-+# define I965_MARI_CLOCK_GATE_DISABLE (1 << 15)
-+# define I965_MASF_CLOCK_GATE_DISABLE (1 << 14)
-+# define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13)
-+# define I965_EM_CLOCK_GATE_DISABLE (1 << 12)
-+# define I965_UC_CLOCK_GATE_DISABLE (1 << 11)
-+# define I965_SI_CLOCK_GATE_DISABLE (1 << 6)
-+# define I965_MT_CLOCK_GATE_DISABLE (1 << 5)
-+# define I965_PL_CLOCK_GATE_DISABLE (1 << 4)
-+# define I965_DG_CLOCK_GATE_DISABLE (1 << 3)
-+# define I965_QC_CLOCK_GATE_DISABLE (1 << 2)
-+# define I965_FT_CLOCK_GATE_DISABLE (1 << 1)
-+# define I965_DM_CLOCK_GATE_DISABLE (1 << 0)
-+
-+#define RENCLK_GATE_D2 0x6208
-+#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
-+#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
-+#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
-+#define RAMCLK_GATE_D 0x6210 /* CRL only */
-+#define DEUC 0x6214 /* CRL only */
-
- /*
- * Palette regs
-@@ -683,6 +811,7 @@
- #define SDVOB_HOTPLUG_INT_EN (1 << 26)
- #define SDVOC_HOTPLUG_INT_EN (1 << 25)
- #define TV_HOTPLUG_INT_EN (1 << 18)
-+#define CRT_EOS_INT_EN (1 << 10)
- #define CRT_HOTPLUG_INT_EN (1 << 9)
- #define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
- #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
-@@ -717,6 +846,7 @@
- #define DPC_HOTPLUG_INT_STATUS (1 << 28)
- #define HDMID_HOTPLUG_INT_STATUS (1 << 27)
- #define DPD_HOTPLUG_INT_STATUS (1 << 27)
-+#define CRT_EOS_INT_STATUS (1 << 12)
- #define CRT_HOTPLUG_INT_STATUS (1 << 11)
- #define TV_HOTPLUG_INT_STATUS (1 << 10)
- #define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
-@@ -1586,6 +1716,7 @@
- #define PIPECONF_PROGRESSIVE (0 << 21)
- #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
- #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
-+#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
- #define PIPEASTAT 0x70024
- #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
- #define PIPE_CRC_ERROR_ENABLE (1UL<<29)
-@@ -1733,6 +1864,7 @@
- #define DISPPLANE_NO_LINE_DOUBLE 0
- #define DISPPLANE_STEREO_POLARITY_FIRST 0
- #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-+#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */
- #define DISPPLANE_TILED (1<<10)
- #define DSPAADDR 0x70184
- #define DSPASTRIDE 0x70188
-@@ -1913,6 +2045,9 @@
- #define GTIIR 0x44018
- #define GTIER 0x4401c
-
-+#define DISP_ARB_CTL 0x45000
-+#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
-+
- /* PCH */
-
- /* south display engine interrupt */
-diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
-index 1d04e19..20d4d19 100644
---- a/drivers/gpu/drm/i915/i915_suspend.c
-+++ b/drivers/gpu/drm/i915/i915_suspend.c
-@@ -461,7 +461,7 @@ int i915_save_state(struct drm_device *dev)
-
- /* Clock gating state */
- dev_priv->saveD_STATE = I915_READ(D_STATE);
-- dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
-+ dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
-
- /* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-@@ -588,7 +588,7 @@ int i915_restore_state(struct drm_device *dev)
-
- /* Clock gating state */
- I915_WRITE (D_STATE, dev_priv->saveD_STATE);
-- I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
-+ I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
-
- /* Cache mode state */
- I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
-diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
-index f806fcc..1e28c16 100644
---- a/drivers/gpu/drm/i915/intel_bios.c
-+++ b/drivers/gpu/drm/i915/intel_bios.c
-@@ -355,8 +355,14 @@ parse_driver_features(struct drm_i915_private *dev_priv,
- }
-
- driver = find_section(bdb, BDB_DRIVER_FEATURES);
-- if (driver && driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
-+ if (!driver)
-+ return;
-+
-+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
- dev_priv->edp_support = 1;
-+
-+ if (driver->dual_frequency)
-+ dev_priv->render_reclock_avail = true;
- }
-
- /**
-diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
-index 590f81c..88814fa 100644
---- a/drivers/gpu/drm/i915/intel_crt.c
-+++ b/drivers/gpu/drm/i915/intel_crt.c
-@@ -64,6 +64,34 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
- }
-
- I915_WRITE(reg, temp);
-+
-+ if (IS_IGD(dev)) {
-+ if (mode == DRM_MODE_DPMS_OFF) {
-+ /* turn off DAC */
-+ temp = I915_READ(PORT_HOTPLUG_EN);
-+ temp &= ~CRT_EOS_INT_EN;
-+ I915_WRITE(PORT_HOTPLUG_EN, temp);
-+
-+ temp = I915_READ(PORT_HOTPLUG_STAT);
-+ if (temp & CRT_EOS_INT_STATUS)
-+ I915_WRITE(PORT_HOTPLUG_STAT,
-+ CRT_EOS_INT_STATUS);
-+ } else {
-+ /* turn on DAC. EOS interrupt must be enabled after DAC
-+ * is enabled, so it sounds not good to enable it in
-+ * i915_driver_irq_postinstall()
-+ * wait 12.5ms after DAC is enabled
-+ */
-+ msleep(13);
-+ temp = I915_READ(PORT_HOTPLUG_STAT);
-+ if (temp & CRT_EOS_INT_STATUS)
-+ I915_WRITE(PORT_HOTPLUG_STAT,
-+ CRT_EOS_INT_STATUS);
-+ temp = I915_READ(PORT_HOTPLUG_EN);
-+ temp |= CRT_EOS_INT_EN;
-+ I915_WRITE(PORT_HOTPLUG_EN, temp);
-+ }
-+ }
- }
-
- static int intel_crt_mode_valid(struct drm_connector *connector,
-diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 748ed50..155719f 100644
---- a/drivers/gpu/drm/i915/intel_display.c
-+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -38,6 +38,7 @@
-
- bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
- static void intel_update_watermarks(struct drm_device *dev);
-+static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
-
- typedef struct {
- /* given values */
-@@ -67,6 +68,8 @@ struct intel_limit {
- intel_p2_t p2;
- bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
-+ bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
-+ int, int, intel_clock_t *);
- };
-
- #define I8XX_DOT_MIN 25000
-@@ -261,6 +264,9 @@ static bool
- intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
- static bool
-+intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-+ int target, int refclk, intel_clock_t *best_clock);
-+static bool
- intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
- static bool
-@@ -286,6 +292,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
- .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
- .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
- .find_pll = intel_find_best_PLL,
-+ .find_reduced_pll = intel_find_best_reduced_PLL,
- };
-
- static const intel_limit_t intel_limits_i8xx_lvds = {
-@@ -300,6 +307,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
- .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
- .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
- .find_pll = intel_find_best_PLL,
-+ .find_reduced_pll = intel_find_best_reduced_PLL,
- };
-
- static const intel_limit_t intel_limits_i9xx_sdvo = {
-@@ -314,6 +322,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
- .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
- .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
- .find_pll = intel_find_best_PLL,
-+ .find_reduced_pll = intel_find_best_reduced_PLL,
- };
-
- static const intel_limit_t intel_limits_i9xx_lvds = {
-@@ -331,6 +340,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
- .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
- .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
- .find_pll = intel_find_best_PLL,
-+ .find_reduced_pll = intel_find_best_reduced_PLL,
- };
-
- /* below parameter and function is for G4X Chipset Family*/
-@@ -348,6 +358,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
- .p2_fast = G4X_P2_SDVO_FAST
- },
- .find_pll = intel_g4x_find_best_PLL,
-+ .find_reduced_pll = intel_g4x_find_best_PLL,
- };
-
- static const intel_limit_t intel_limits_g4x_hdmi = {
-@@ -364,6 +375,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
- .p2_fast = G4X_P2_HDMI_DAC_FAST
- },
- .find_pll = intel_g4x_find_best_PLL,
-+ .find_reduced_pll = intel_g4x_find_best_PLL,
- };
-
- static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
-@@ -388,6 +400,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
- .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
- },
- .find_pll = intel_g4x_find_best_PLL,
-+ .find_reduced_pll = intel_g4x_find_best_PLL,
- };
-
- static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
-@@ -412,6 +425,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
- .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
- },
- .find_pll = intel_g4x_find_best_PLL,
-+ .find_reduced_pll = intel_g4x_find_best_PLL,
- };
-
- static const intel_limit_t intel_limits_g4x_display_port = {
-@@ -449,6 +463,7 @@ static const intel_limit_t intel_limits_igd_sdvo = {
- .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
- .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
- .find_pll = intel_find_best_PLL,
-+ .find_reduced_pll = intel_find_best_reduced_PLL,
- };
-
- static const intel_limit_t intel_limits_igd_lvds = {
-@@ -464,6 +479,7 @@ static const intel_limit_t intel_limits_igd_lvds = {
- .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
- .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
- .find_pll = intel_find_best_PLL,
-+ .find_reduced_pll = intel_find_best_reduced_PLL,
- };
-
- static const intel_limit_t intel_limits_igdng_sdvo = {
-@@ -688,15 +704,16 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-
- memset (best_clock, 0, sizeof (*best_clock));
-
-- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
-- for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
-- /* m1 is always 0 in IGD */
-- if (clock.m2 >= clock.m1 && !IS_IGD(dev))
-- break;
-- for (clock.n = limit->n.min; clock.n <= limit->n.max;
-- clock.n++) {
-- for (clock.p1 = limit->p1.min;
-- clock.p1 <= limit->p1.max; clock.p1++) {
-+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
-+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
-+ clock.m1++) {
-+ for (clock.m2 = limit->m2.min;
-+ clock.m2 <= limit->m2.max; clock.m2++) {
-+ /* m1 is always 0 in IGD */
-+ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
-+ break;
-+ for (clock.n = limit->n.min;
-+ clock.n <= limit->n.max; clock.n++) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
-@@ -717,6 +734,46 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- return (err != target);
- }
-
-+
-+static bool
-+intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-+ int target, int refclk, intel_clock_t *best_clock)
-+
-+{
-+ struct drm_device *dev = crtc->dev;
-+ intel_clock_t clock;
-+ int err = target;
-+ bool found = false;
-+
-+ memcpy(&clock, best_clock, sizeof(intel_clock_t));
-+
-+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
-+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
-+ /* m1 is always 0 in IGD */
-+ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
-+ break;
-+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
-+ clock.n++) {
-+ int this_err;
-+
-+ intel_clock(dev, refclk, &clock);
-+
-+ if (!intel_PLL_is_valid(crtc, &clock))
-+ continue;
-+
-+ this_err = abs(clock.dot - target);
-+ if (this_err < err) {
-+ *best_clock = clock;
-+ err = this_err;
-+ found = true;
-+ }
-+ }
-+ }
-+ }
-+
-+ return found;
-+}
-+
- static bool
- intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-@@ -747,7 +804,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- max_n = limit->n.max;
- /* based on hardware requriment prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
-- /* based on hardware requirment prefere larger m1,m2, p1 */
-+ /* based on hardware requirment prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
-@@ -832,15 +889,14 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-
- memset(best_clock, 0, sizeof(*best_clock));
- max_n = limit->n.max;
-- /* based on hardware requriment prefer smaller n to precision */
-- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
-- /* based on hardware requirment prefere larger m1,m2, p1 */
-- for (clock.m1 = limit->m1.max;
-- clock.m1 >= limit->m1.min; clock.m1--) {
-- for (clock.m2 = limit->m2.max;
-- clock.m2 >= limit->m2.min; clock.m2--) {
-- for (clock.p1 = limit->p1.max;
-- clock.p1 >= limit->p1.min; clock.p1--) {
-+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
-+ /* based on hardware requriment prefer smaller n to precision */
-+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
-+ /* based on hardware requirment prefere larger m1,m2 */
-+ for (clock.m1 = limit->m1.max;
-+ clock.m1 >= limit->m1.min; clock.m1--) {
-+ for (clock.m2 = limit->m2.max;
-+ clock.m2 >= limit->m2.min; clock.m2--) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
-@@ -1008,6 +1064,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- dspcntr &= ~DISPPLANE_TILED;
- }
-
-+ if (IS_IGDNG(dev))
-+ /* must disable */
-+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-+
- I915_WRITE(dspcntr_reg, dspcntr);
-
- Start = obj_priv->gtt_offset;
-@@ -1030,8 +1090,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
-
- if (old_fb) {
- intel_fb = to_intel_framebuffer(old_fb);
-+ obj_priv = intel_fb->obj->driver_private;
- i915_gem_object_unpin(intel_fb->obj);
- }
-+ intel_increase_pllclock(crtc, true);
-+
- mutex_unlock(&dev->struct_mutex);
-
- if (!dev->primary->master)
-@@ -1581,6 +1644,8 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
- else
- i9xx_crtc_dpms(crtc, mode);
-
-+ intel_crtc->dpms_mode = mode;
-+
- if (!dev->primary->master)
- return;
-
-@@ -1603,8 +1668,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
- DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
- break;
- }
--
-- intel_crtc->dpms_mode = mode;
- }
-
- static void intel_crtc_prepare (struct drm_crtc *crtc)
-@@ -2054,6 +2117,18 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane)
- return size;
- }
-
-+static void g4x_update_wm(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ u32 fw_blc_self = I915_READ(FW_BLC_SELF);
-+
-+ if (i915_powersave)
-+ fw_blc_self |= FW_BLC_SELF_EN;
-+ else
-+ fw_blc_self &= ~FW_BLC_SELF_EN;
-+ I915_WRITE(FW_BLC_SELF, fw_blc_self);
-+}
-+
- static void i965_update_wm(struct drm_device *dev)
- {
- struct drm_i915_private *dev_priv = dev->dev_private;
-@@ -2105,7 +2180,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- cwm = 2;
-
- /* Calc sr entries for one plane configs */
-- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
-+ if (HAS_FW_BLC(dev) && sr_hdisplay &&
-+ (!planea_clock || !planeb_clock)) {
- /* self-refresh has much higher latency */
- const static int sr_latency_ns = 6000;
-
-@@ -2120,8 +2196,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- srwm = total_size - sr_entries;
- if (srwm < 0)
- srwm = 1;
-- if (IS_I9XX(dev))
-- I915_WRITE(FW_BLC_SELF, (srwm & 0x3f));
-+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
- }
-
- DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
-@@ -2195,9 +2270,6 @@ static void intel_update_watermarks(struct drm_device *dev)
- unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
- int enabled = 0, pixel_size = 0;
-
-- if (DSPARB_HWCONTROL(dev))
-- return;
--
- /* Get the clock config from both planes */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
-@@ -2230,7 +2302,9 @@ static void intel_update_watermarks(struct drm_device *dev)
- else if (IS_IGD(dev))
- igd_disable_cxsr(dev);
-
-- if (IS_I965G(dev))
-+ if (IS_G4X(dev))
-+ g4x_update_wm(dev);
-+ else if (IS_I965G(dev))
- i965_update_wm(dev);
- else if (IS_I9XX(dev) || IS_MOBILE(dev))
- i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
-@@ -2264,9 +2338,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
- int refclk, num_outputs = 0;
-- intel_clock_t clock;
-- u32 dpll = 0, fp = 0, dspcntr, pipeconf;
-- bool ok, is_sdvo = false, is_dvo = false;
-+ intel_clock_t clock, reduced_clock;
-+ u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
-+ bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- bool is_edp = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
-@@ -2349,6 +2423,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- return -EINVAL;
- }
-
-+ if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) {
-+ memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
-+ has_reduced_clock = limit->find_reduced_pll(limit, crtc,
-+ (adjusted_mode->clock*3/4),
-+ refclk,
-+ &reduced_clock);
-+ }
-+
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
- if (is_sdvo && is_tv) {
-@@ -2394,10 +2476,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- link_bw, &m_n);
- }
-
-- if (IS_IGD(dev))
-+ if (IS_IGD(dev)) {
- fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
-- else
-+ if (has_reduced_clock)
-+ fp2 = (1 << reduced_clock.n) << 16 |
-+ reduced_clock.m1 << 8 | reduced_clock.m2;
-+ } else {
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
-+ if (has_reduced_clock)
-+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
-+ reduced_clock.m2;
-+ }
-
- if (!IS_IGDNG(dev))
- dpll = DPLL_VGA_MODE_DIS;
-@@ -2426,6 +2515,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- /* also FPA1 */
- if (IS_IGDNG(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-+ if (IS_G4X(dev) && has_reduced_clock)
-+ dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock.p2) {
- case 5:
-@@ -2573,6 +2664,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
- udelay(150);
- }
-
-+ if (is_lvds && has_reduced_clock && i915_powersave) {
-+ I915_WRITE(fp_reg + 4, fp2);
-+ intel_crtc->lowfreq_avail = true;
-+ if (HAS_PIPE_CXSR(dev)) {
-+ DRM_DEBUG("enabling CxSR downclocking\n");
-+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
-+ }
-+ } else {
-+ I915_WRITE(fp_reg + 4, fp);
-+ intel_crtc->lowfreq_avail = false;
-+ if (HAS_PIPE_CXSR(dev)) {
-+ DRM_DEBUG("disabling CxSR downclocking\n");
-+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
-+ }
-+ }
-+
- I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
-@@ -2616,6 +2723,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
-
- intel_wait_for_vblank(dev);
-
-+ if (IS_IGDNG(dev)) {
-+ /* enable address swizzle for tiling buffer */
-+ temp = I915_READ(DISP_ARB_CTL);
-+ I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
-+ }
-+
- I915_WRITE(dspcntr_reg, dspcntr);
-
- /* Flush the plane changes */
-@@ -2769,10 +2882,16 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+ struct intel_framebuffer *intel_fb;
- int pipe = intel_crtc->pipe;
- uint32_t temp = 0;
- uint32_t adder;
-
-+ if (crtc->fb) {
-+ intel_fb = to_intel_framebuffer(crtc->fb);
-+ intel_mark_busy(dev, intel_fb->obj);
-+ }
-+
- if (x < 0) {
- temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
-@@ -3070,12 +3189,319 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- return mode;
- }
-
-+#define GPU_IDLE_TIMEOUT 500 /* ms */
-+
-+/* When this timer fires, we've been idle for awhile */
-+static void intel_gpu_idle_timer(unsigned long arg)
-+{
-+ struct drm_device *dev = (struct drm_device *)arg;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ DRM_DEBUG("idle timer fired, downclocking\n");
-+
-+ dev_priv->busy = false;
-+
-+ queue_work(dev_priv->wq, &dev_priv->idle_work);
-+}
-+
-+void intel_increase_renderclock(struct drm_device *dev, bool schedule)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ if (IS_IGDNG(dev))
-+ return;
-+
-+ if (!dev_priv->render_reclock_avail) {
-+ DRM_DEBUG("not reclocking render clock\n");
-+ return;
-+ }
-+
-+ /* Restore render clock frequency to original value */
-+ if (IS_G4X(dev) || IS_I9XX(dev))
-+ pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
-+ else if (IS_I85X(dev))
-+ pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
-+ DRM_DEBUG("increasing render clock frequency\n");
-+
-+ /* Schedule downclock */
-+ if (schedule)
-+ mod_timer(&dev_priv->idle_timer, jiffies +
-+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-+}
-+
-+void intel_decrease_renderclock(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ if (IS_IGDNG(dev))
-+ return;
-+
-+ if (!dev_priv->render_reclock_avail) {
-+ DRM_DEBUG("not reclocking render clock\n");
-+ return;
-+ }
-+
-+ if (IS_G4X(dev)) {
-+ u16 gcfgc;
-+
-+ /* Adjust render clock... */
-+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-+
-+ /* Down to minimum... */
-+ gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
-+ gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
-+
-+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
-+ } else if (IS_I965G(dev)) {
-+ u16 gcfgc;
-+
-+ /* Adjust render clock... */
-+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-+
-+ /* Down to minimum... */
-+ gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
-+ gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
-+
-+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
-+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
-+ u16 gcfgc;
-+
-+ /* Adjust render clock... */
-+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-+
-+ /* Down to minimum... */
-+ gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
-+ gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
-+
-+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
-+ } else if (IS_I915G(dev)) {
-+ u16 gcfgc;
-+
-+ /* Adjust render clock... */
-+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-+
-+ /* Down to minimum... */
-+ gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
-+ gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
-+
-+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
-+ } else if (IS_I85X(dev)) {
-+ u16 hpllcc;
-+
-+ /* Adjust render clock... */
-+ pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
-+
-+ /* Up to maximum... */
-+ hpllcc &= ~GC_CLOCK_CONTROL_MASK;
-+ hpllcc |= GC_CLOCK_133_200;
-+
-+ pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
-+ }
-+ DRM_DEBUG("decreasing render clock frequency\n");
-+}
-+
-+/* Note that no increase function is needed for this - increase_renderclock()
-+ * will also rewrite these bits
-+ */
-+void intel_decrease_displayclock(struct drm_device *dev)
-+{
-+ if (IS_IGDNG(dev))
-+ return;
-+
-+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
-+ IS_I915GM(dev)) {
-+ u16 gcfgc;
-+
-+ /* Adjust render clock... */
-+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-+
-+ /* Down to minimum... */
-+ gcfgc &= ~0xf0;
-+ gcfgc |= 0x80;
-+
-+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
-+ }
-+}
-+
-+#define CRTC_IDLE_TIMEOUT 1000 /* ms */
-+
-+static void intel_crtc_idle_timer(unsigned long arg)
-+{
-+ struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
-+ struct drm_crtc *crtc = &intel_crtc->base;
-+ drm_i915_private_t *dev_priv = crtc->dev->dev_private;
-+
-+ DRM_DEBUG("idle timer fired, downclocking\n");
-+
-+ intel_crtc->busy = false;
-+
-+ queue_work(dev_priv->wq, &dev_priv->idle_work);
-+}
-+
-+static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+ int pipe = intel_crtc->pipe;
-+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-+ int dpll = I915_READ(dpll_reg);
-+
-+ if (IS_IGDNG(dev))
-+ return;
-+
-+ if (!dev_priv->lvds_downclock_avail)
-+ return;
-+
-+ if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-+ DRM_DEBUG("upclocking LVDS\n");
-+
-+ /* Unlock panel regs */
-+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
-+
-+ dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-+ I915_WRITE(dpll_reg, dpll);
-+ dpll = I915_READ(dpll_reg);
-+ intel_wait_for_vblank(dev);
-+ dpll = I915_READ(dpll_reg);
-+ if (dpll & DISPLAY_RATE_SELECT_FPA1)
-+ DRM_DEBUG("failed to upclock LVDS!\n");
-+
-+ /* ...and lock them again */
-+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
-+ }
-+
-+ /* Schedule downclock */
-+ if (schedule)
-+ mod_timer(&intel_crtc->idle_timer, jiffies +
-+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-+}
-+
-+static void intel_decrease_pllclock(struct drm_crtc *crtc)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-+ int pipe = intel_crtc->pipe;
-+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-+ int dpll = I915_READ(dpll_reg);
-+
-+ if (IS_IGDNG(dev))
-+ return;
-+
-+ if (!dev_priv->lvds_downclock_avail)
-+ return;
-+
-+ /*
-+ * Since this is called by a timer, we should never get here in
-+ * the manual case.
-+ */
-+ if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
-+ DRM_DEBUG("downclocking LVDS\n");
-+
-+ /* Unlock panel regs */
-+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
-+
-+ dpll |= DISPLAY_RATE_SELECT_FPA1;
-+ I915_WRITE(dpll_reg, dpll);
-+ dpll = I915_READ(dpll_reg);
-+ intel_wait_for_vblank(dev);
-+ dpll = I915_READ(dpll_reg);
-+ if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
-+ DRM_DEBUG("failed to downclock LVDS!\n");
-+
-+ /* ...and lock them again */
-+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
-+ }
-+
-+}
-+
-+/**
-+ * intel_idle_update - adjust clocks for idleness
-+ * @work: work struct
-+ *
-+ * Either the GPU or display (or both) went idle. Check the busy status
-+ * here and adjust the CRTC and GPU clocks as necessary.
-+ */
-+static void intel_idle_update(struct work_struct *work)
-+{
-+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-+ idle_work);
-+ struct drm_device *dev = dev_priv->dev;
-+ struct drm_crtc *crtc;
-+ struct intel_crtc *intel_crtc;
-+
-+ if (!i915_powersave)
-+ return;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ /* GPU isn't processing, downclock it. */
-+ if (!dev_priv->busy) {
-+ intel_decrease_renderclock(dev);
-+ intel_decrease_displayclock(dev);
-+ }
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ /* Skip inactive CRTCs */
-+ if (!crtc->fb)
-+ continue;
-+
-+ intel_crtc = to_intel_crtc(crtc);
-+ if (!intel_crtc->busy)
-+ intel_decrease_pllclock(crtc);
-+ }
-+
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
-+/**
-+ * intel_mark_busy - mark the GPU and possibly the display busy
-+ * @dev: drm device
-+ * @obj: object we're operating on
-+ *
-+ * Callers can use this function to indicate that the GPU is busy processing
-+ * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
-+ * buffer), we'll also mark the display as busy, so we know to increase its
-+ * clock frequency.
-+ */
-+void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_crtc *crtc = NULL;
-+ struct intel_framebuffer *intel_fb;
-+ struct intel_crtc *intel_crtc;
-+
-+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
-+ return;
-+
-+ dev_priv->busy = true;
-+ intel_increase_renderclock(dev, true);
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ if (!crtc->fb)
-+ continue;
-+
-+ intel_crtc = to_intel_crtc(crtc);
-+ intel_fb = to_intel_framebuffer(crtc->fb);
-+ if (intel_fb->obj == obj) {
-+ if (!intel_crtc->busy) {
-+ /* Non-busy -> busy, upclock */
-+ intel_increase_pllclock(crtc, true);
-+ intel_crtc->busy = true;
-+ } else {
-+ /* Busy -> busy, put off timer */
-+ mod_timer(&intel_crtc->idle_timer, jiffies +
-+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-+ }
-+ }
-+ }
-+}
-+
- static void intel_crtc_destroy(struct drm_crtc *crtc)
- {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-- if (intel_crtc->mode_set.mode)
-- drm_mode_destroy(crtc->dev, intel_crtc->mode_set.mode);
- drm_crtc_cleanup(crtc);
- kfree(intel_crtc);
- }
-@@ -3122,15 +3548,10 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
- drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
-- intel_crtc->mode_set.crtc = &intel_crtc->base;
-- intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1);
-- intel_crtc->mode_set.num_connectors = 0;
--
-- if (i915_fbpercrtc) {
--
-+ intel_crtc->busy = false;
-
--
-- }
-+ setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
-+ (unsigned long)intel_crtc);
- }
-
- int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
-@@ -3138,30 +3559,26 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
-- struct drm_crtc *crtc = NULL;
-- int pipe = -1;
-+ struct drm_mode_object *drmmode_obj;
-+ struct intel_crtc *crtc;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-- if (crtc->base.id == pipe_from_crtc_id->crtc_id) {
-- pipe = intel_crtc->pipe;
-- break;
-- }
-- }
-+ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
-+ DRM_MODE_OBJECT_CRTC);
-
-- if (pipe == -1) {
-+ if (!drmmode_obj) {
- DRM_ERROR("no such CRTC id\n");
- return -EINVAL;
- }
-
-- pipe_from_crtc_id->pipe = pipe;
-+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
-+ pipe_from_crtc_id->pipe = crtc->pipe;
-
-- return 0;
-+ return 0;
- }
-
- struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
-@@ -3362,8 +3779,56 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
- .fb_changed = intelfb_probe,
- };
-
-+void intel_init_clock_gating(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+ /*
-+ * Disable clock gating reported to work incorrectly according to the
-+ * specs, but enable as much else as we can.
-+ */
-+ if (IS_G4X(dev)) {
-+ uint32_t dspclk_gate;
-+ I915_WRITE(RENCLK_GATE_D1, 0);
-+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
-+ GS_UNIT_CLOCK_GATE_DISABLE |
-+ CL_UNIT_CLOCK_GATE_DISABLE);
-+ I915_WRITE(RAMCLK_GATE_D, 0);
-+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
-+ OVRUNIT_CLOCK_GATE_DISABLE |
-+ OVCUNIT_CLOCK_GATE_DISABLE;
-+ if (IS_GM45(dev))
-+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
-+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
-+ } else if (IS_I965GM(dev)) {
-+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
-+ I915_WRITE(RENCLK_GATE_D2, 0);
-+ I915_WRITE(DSPCLK_GATE_D, 0);
-+ I915_WRITE(RAMCLK_GATE_D, 0);
-+ I915_WRITE16(DEUC, 0);
-+ } else if (IS_I965G(dev)) {
-+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
-+ I965_RCC_CLOCK_GATE_DISABLE |
-+ I965_RCPB_CLOCK_GATE_DISABLE |
-+ I965_ISC_CLOCK_GATE_DISABLE |
-+ I965_FBC_CLOCK_GATE_DISABLE);
-+ I915_WRITE(RENCLK_GATE_D2, 0);
-+ } else if (IS_I9XX(dev)) {
-+ u32 dstate = I915_READ(D_STATE);
-+
-+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
-+ DSTATE_DOT_CLOCK_GATING;
-+ I915_WRITE(D_STATE, dstate);
-+ } else if (IS_I855(dev) || IS_I865G(dev)) {
-+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-+ } else if (IS_I830(dev)) {
-+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-+ }
-+}
-+
- void intel_modeset_init(struct drm_device *dev)
- {
-+ struct drm_i915_private *dev_priv = dev->dev_private;
- int num_pipe;
- int i;
-
-@@ -3398,15 +3863,47 @@ void intel_modeset_init(struct drm_device *dev)
- DRM_DEBUG("%d display pipe%s available.\n",
- num_pipe, num_pipe > 1 ? "s" : "");
-
-+ if (IS_I85X(dev))
-+ pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
-+ else if (IS_I9XX(dev) || IS_G4X(dev))
-+ pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
-+
- for (i = 0; i < num_pipe; i++) {
- intel_crtc_init(dev, i);
- }
-
- intel_setup_outputs(dev);
-+
-+ intel_init_clock_gating(dev);
-+
-+ INIT_WORK(&dev_priv->idle_work, intel_idle_update);
-+ setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
-+ (unsigned long)dev);
- }
-
- void intel_modeset_cleanup(struct drm_device *dev)
- {
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_crtc *crtc;
-+ struct intel_crtc *intel_crtc;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ /* Skip inactive CRTCs */
-+ if (!crtc->fb)
-+ continue;
-+
-+ intel_crtc = to_intel_crtc(crtc);
-+ intel_increase_pllclock(crtc, false);
-+ del_timer_sync(&intel_crtc->idle_timer);
-+ }
-+
-+ intel_increase_renderclock(dev, false);
-+ del_timer_sync(&dev_priv->idle_timer);
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
- drm_mode_config_cleanup(dev);
- }
-
-diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
-index 25aa6fa..b9e47f1 100644
---- a/drivers/gpu/drm/i915/intel_drv.h
-+++ b/drivers/gpu/drm/i915/intel_drv.h
-@@ -116,9 +116,9 @@ struct intel_crtc {
- uint32_t cursor_addr;
- u8 lut_r[256], lut_g[256], lut_b[256];
- int dpms_mode;
-- struct intel_framebuffer *fbdev_fb;
-- /* a mode_set for fbdev users on this crtc */
-- struct drm_mode_set mode_set;
-+ bool busy; /* is scanout buffer being updated frequently? */
-+ struct timer_list idle_timer;
-+ bool lowfreq_avail;
- };
-
- #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-@@ -137,6 +137,7 @@ extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
- extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
- extern void intel_dvo_init(struct drm_device *dev);
- extern void intel_tv_init(struct drm_device *dev);
-+extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
- extern void intel_lvds_init(struct drm_device *dev);
- extern void intel_dp_init(struct drm_device *dev, int dp_reg);
- void
-diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
-index 1d30802..7ba4a23 100644
---- a/drivers/gpu/drm/i915/intel_fb.c
-+++ b/drivers/gpu/drm/i915/intel_fb.c
-@@ -39,339 +39,34 @@
- #include "drmP.h"
- #include "drm.h"
- #include "drm_crtc.h"
-+#include "drm_fb_helper.h"
- #include "intel_drv.h"
- #include "i915_drm.h"
- #include "i915_drv.h"
-
- struct intelfb_par {
-- struct drm_device *dev;
-- struct drm_display_mode *our_mode;
-+ struct drm_fb_helper helper;
- struct intel_framebuffer *intel_fb;
-- int crtc_count;
-- /* crtc currently bound to this */
-- uint32_t crtc_ids[2];
-+ struct drm_display_mode *our_mode;
- };
-
--static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green,
-- unsigned blue, unsigned transp,
-- struct fb_info *info)
--{
-- struct intelfb_par *par = info->par;
-- struct drm_device *dev = par->dev;
-- struct drm_crtc *crtc;
-- int i;
--
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-- struct drm_mode_set *modeset = &intel_crtc->mode_set;
-- struct drm_framebuffer *fb = modeset->fb;
--
-- for (i = 0; i < par->crtc_count; i++)
-- if (crtc->base.id == par->crtc_ids[i])
-- break;
--
-- if (i == par->crtc_count)
-- continue;
--
--
-- if (regno > 255)
-- return 1;
--
-- if (fb->depth == 8) {
-- intel_crtc_fb_gamma_set(crtc, red, green, blue, regno);
-- return 0;
-- }
--
-- if (regno < 16) {
-- switch (fb->depth) {
-- case 15:
-- fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
-- ((green & 0xf800) >> 6) |
-- ((blue & 0xf800) >> 11);
-- break;
-- case 16:
-- fb->pseudo_palette[regno] = (red & 0xf800) |
-- ((green & 0xfc00) >> 5) |
-- ((blue & 0xf800) >> 11);
-- break;
-- case 24:
-- case 32:
-- fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
-- (green & 0xff00) |
-- ((blue & 0xff00) >> 8);
-- break;
-- }
-- }
-- }
-- return 0;
--}
--
--static int intelfb_check_var(struct fb_var_screeninfo *var,
-- struct fb_info *info)
--{
-- struct intelfb_par *par = info->par;
-- struct intel_framebuffer *intel_fb = par->intel_fb;
-- struct drm_framebuffer *fb = &intel_fb->base;
-- int depth;
--
-- if (var->pixclock == -1 || !var->pixclock)
-- return -EINVAL;
--
-- /* Need to resize the fb object !!! */
-- if (var->xres > fb->width || var->yres > fb->height) {
-- DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height);
-- DRM_ERROR("Need resizing code.\n");
-- return -EINVAL;
-- }
--
-- switch (var->bits_per_pixel) {
-- case 16:
-- depth = (var->green.length == 6) ? 16 : 15;
-- break;
-- case 32:
-- depth = (var->transp.length > 0) ? 32 : 24;
-- break;
-- default:
-- depth = var->bits_per_pixel;
-- break;
-- }
--
-- switch (depth) {
-- case 8:
-- var->red.offset = 0;
-- var->green.offset = 0;
-- var->blue.offset = 0;
-- var->red.length = 8;
-- var->green.length = 8;
-- var->blue.length = 8;
-- var->transp.length = 0;
-- var->transp.offset = 0;
-- break;
-- case 15:
-- var->red.offset = 10;
-- var->green.offset = 5;
-- var->blue.offset = 0;
-- var->red.length = 5;
-- var->green.length = 5;
-- var->blue.length = 5;
-- var->transp.length = 1;
-- var->transp.offset = 15;
-- break;
-- case 16:
-- var->red.offset = 11;
-- var->green.offset = 5;
-- var->blue.offset = 0;
-- var->red.length = 5;
-- var->green.length = 6;
-- var->blue.length = 5;
-- var->transp.length = 0;
-- var->transp.offset = 0;
-- break;
-- case 24:
-- var->red.offset = 16;
-- var->green.offset = 8;
-- var->blue.offset = 0;
-- var->red.length = 8;
-- var->green.length = 8;
-- var->blue.length = 8;
-- var->transp.length = 0;
-- var->transp.offset = 0;
-- break;
-- case 32:
-- var->red.offset = 16;
-- var->green.offset = 8;
-- var->blue.offset = 0;
-- var->red.length = 8;
-- var->green.length = 8;
-- var->blue.length = 8;
-- var->transp.length = 8;
-- var->transp.offset = 24;
-- break;
-- default:
-- return -EINVAL;
-- }
--
-- return 0;
--}
--
--/* this will let fbcon do the mode init */
--/* FIXME: take mode config lock? */
--static int intelfb_set_par(struct fb_info *info)
--{
-- struct intelfb_par *par = info->par;
-- struct drm_device *dev = par->dev;
-- struct fb_var_screeninfo *var = &info->var;
-- int i;
--
-- DRM_DEBUG("%d %d\n", var->xres, var->pixclock);
--
-- if (var->pixclock != -1) {
--
-- DRM_ERROR("PIXEL CLOCK SET\n");
-- return -EINVAL;
-- } else {
-- struct drm_crtc *crtc;
-- int ret;
--
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--
-- for (i = 0; i < par->crtc_count; i++)
-- if (crtc->base.id == par->crtc_ids[i])
-- break;
--
-- if (i == par->crtc_count)
-- continue;
--
-- if (crtc->fb == intel_crtc->mode_set.fb) {
-- mutex_lock(&dev->mode_config.mutex);
-- ret = crtc->funcs->set_config(&intel_crtc->mode_set);
-- mutex_unlock(&dev->mode_config.mutex);
-- if (ret)
-- return ret;
-- }
-- }
-- return 0;
-- }
--}
--
--static int intelfb_pan_display(struct fb_var_screeninfo *var,
-- struct fb_info *info)
--{
-- struct intelfb_par *par = info->par;
-- struct drm_device *dev = par->dev;
-- struct drm_mode_set *modeset;
-- struct drm_crtc *crtc;
-- struct intel_crtc *intel_crtc;
-- int ret = 0;
-- int i;
--
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- for (i = 0; i < par->crtc_count; i++)
-- if (crtc->base.id == par->crtc_ids[i])
-- break;
--
-- if (i == par->crtc_count)
-- continue;
--
-- intel_crtc = to_intel_crtc(crtc);
-- modeset = &intel_crtc->mode_set;
--
-- modeset->x = var->xoffset;
-- modeset->y = var->yoffset;
--
-- if (modeset->num_connectors) {
-- mutex_lock(&dev->mode_config.mutex);
-- ret = crtc->funcs->set_config(modeset);
-- mutex_unlock(&dev->mode_config.mutex);
-- if (!ret) {
-- info->var.xoffset = var->xoffset;
-- info->var.yoffset = var->yoffset;
-- }
-- }
-- }
--
-- return ret;
--}
--
--static void intelfb_on(struct fb_info *info)
--{
-- struct intelfb_par *par = info->par;
-- struct drm_device *dev = par->dev;
-- struct drm_crtc *crtc;
-- struct drm_encoder *encoder;
-- int i;
--
-- /*
-- * For each CRTC in this fb, find all associated encoders
-- * and turn them off, then turn off the CRTC.
-- */
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
--
-- for (i = 0; i < par->crtc_count; i++)
-- if (crtc->base.id == par->crtc_ids[i])
-- break;
--
-- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
--
-- /* Found a CRTC on this fb, now find encoders */
-- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-- if (encoder->crtc == crtc) {
-- struct drm_encoder_helper_funcs *encoder_funcs;
-- encoder_funcs = encoder->helper_private;
-- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-- }
-- }
-- }
--}
--
--static void intelfb_off(struct fb_info *info, int dpms_mode)
--{
-- struct intelfb_par *par = info->par;
-- struct drm_device *dev = par->dev;
-- struct drm_crtc *crtc;
-- struct drm_encoder *encoder;
-- int i;
--
-- /*
-- * For each CRTC in this fb, find all associated encoders
-- * and turn them off, then turn off the CRTC.
-- */
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
--
-- for (i = 0; i < par->crtc_count; i++)
-- if (crtc->base.id == par->crtc_ids[i])
-- break;
--
-- /* Found a CRTC on this fb, now find encoders */
-- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-- if (encoder->crtc == crtc) {
-- struct drm_encoder_helper_funcs *encoder_funcs;
-- encoder_funcs = encoder->helper_private;
-- encoder_funcs->dpms(encoder, dpms_mode);
-- }
-- }
-- if (dpms_mode == DRM_MODE_DPMS_OFF)
-- crtc_funcs->dpms(crtc, dpms_mode);
-- }
--}
--
--static int intelfb_blank(int blank, struct fb_info *info)
--{
-- switch (blank) {
-- case FB_BLANK_UNBLANK:
-- intelfb_on(info);
-- break;
-- case FB_BLANK_NORMAL:
-- intelfb_off(info, DRM_MODE_DPMS_STANDBY);
-- break;
-- case FB_BLANK_HSYNC_SUSPEND:
-- intelfb_off(info, DRM_MODE_DPMS_STANDBY);
-- break;
-- case FB_BLANK_VSYNC_SUSPEND:
-- intelfb_off(info, DRM_MODE_DPMS_SUSPEND);
-- break;
-- case FB_BLANK_POWERDOWN:
-- intelfb_off(info, DRM_MODE_DPMS_OFF);
-- break;
-- }
-- return 0;
--}
--
- static struct fb_ops intelfb_ops = {
- .owner = THIS_MODULE,
-- .fb_check_var = intelfb_check_var,
-- .fb_set_par = intelfb_set_par,
-- .fb_setcolreg = intelfb_setcolreg,
-+ .fb_check_var = drm_fb_helper_check_var,
-+ .fb_set_par = drm_fb_helper_set_par,
-+ .fb_setcolreg = drm_fb_helper_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-- .fb_pan_display = intelfb_pan_display,
-- .fb_blank = intelfb_blank,
-+ .fb_pan_display = drm_fb_helper_pan_display,
-+ .fb_blank = drm_fb_helper_blank,
- };
-
-+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
-+ .gamma_set = intel_crtc_fb_gamma_set,
-+};
-+
-+
- /**
- * Curretly it is assumed that the old framebuffer is reused.
- *
-@@ -412,25 +107,10 @@ int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
- }
- EXPORT_SYMBOL(intelfb_resize);
-
--static struct drm_mode_set kernelfb_mode;
--
--static int intelfb_panic(struct notifier_block *n, unsigned long ununsed,
-- void *panic_str)
--{
-- DRM_ERROR("panic occurred, switching back to text console\n");
--
-- intelfb_restore();
-- return 0;
--}
--
--static struct notifier_block paniced = {
-- .notifier_call = intelfb_panic,
--};
--
- static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height,
-- struct intel_framebuffer **intel_fb_p)
-+ struct drm_framebuffer **fb_p)
- {
- struct fb_info *info;
- struct intelfb_par *par;
-@@ -479,7 +159,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- intel_fb = to_intel_framebuffer(fb);
-- *intel_fb_p = intel_fb;
-+ *fb_p = fb;
-
- info = framebuffer_alloc(sizeof(struct intelfb_par), device);
- if (!info) {
-@@ -489,21 +169,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
-
- par = info->par;
-
-+ par->helper.funcs = &intel_fb_helper_funcs;
-+ par->helper.dev = dev;
-+ ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
-+ INTELFB_CONN_LIMIT);
-+ if (ret)
-+ goto out_unref;
-+
- strcpy(info->fix.id, "inteldrmfb");
-- info->fix.type = FB_TYPE_PACKED_PIXELS;
-- info->fix.visual = FB_VISUAL_TRUECOLOR;
-- info->fix.type_aux = 0;
-- info->fix.xpanstep = 1; /* doing it in hw */
-- info->fix.ypanstep = 1; /* doing it in hw */
-- info->fix.ywrapstep = 0;
-- info->fix.accel = FB_ACCEL_I830;
-- info->fix.type_aux = 0;
-
- info->flags = FBINFO_DEFAULT;
-
- info->fbops = &intelfb_ops;
-
-- info->fix.line_length = fb->pitch;
-
- /* setup aperture base/size for vesafb takeover */
- info->aperture_base = dev->mode_config.fb_base;
-@@ -527,18 +205,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
-
- // memset(info->screen_base, 0, size);
-
-- info->pseudo_palette = fb->pseudo_palette;
-- info->var.xres_virtual = fb->width;
-- info->var.yres_virtual = fb->height;
-- info->var.bits_per_pixel = fb->bits_per_pixel;
-- info->var.xoffset = 0;
-- info->var.yoffset = 0;
-- info->var.activate = FB_ACTIVATE_NOW;
-- info->var.height = -1;
-- info->var.width = -1;
--
-- info->var.xres = fb_width;
-- info->var.yres = fb_height;
-+ drm_fb_helper_fill_fix(info, fb->pitch);
-+ drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
-
- /* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
-@@ -550,64 +218,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
-
-- switch(fb->depth) {
-- case 8:
-- info->var.red.offset = 0;
-- info->var.green.offset = 0;
-- info->var.blue.offset = 0;
-- info->var.red.length = 8; /* 8bit DAC */
-- info->var.green.length = 8;
-- info->var.blue.length = 8;
-- info->var.transp.offset = 0;
-- info->var.transp.length = 0;
-- break;
-- case 15:
-- info->var.red.offset = 10;
-- info->var.green.offset = 5;
-- info->var.blue.offset = 0;
-- info->var.red.length = 5;
-- info->var.green.length = 5;
-- info->var.blue.length = 5;
-- info->var.transp.offset = 15;
-- info->var.transp.length = 1;
-- break;
-- case 16:
-- info->var.red.offset = 11;
-- info->var.green.offset = 5;
-- info->var.blue.offset = 0;
-- info->var.red.length = 5;
-- info->var.green.length = 6;
-- info->var.blue.length = 5;
-- info->var.transp.offset = 0;
-- break;
-- case 24:
-- info->var.red.offset = 16;
-- info->var.green.offset = 8;
-- info->var.blue.offset = 0;
-- info->var.red.length = 8;
-- info->var.green.length = 8;
-- info->var.blue.length = 8;
-- info->var.transp.offset = 0;
-- info->var.transp.length = 0;
-- break;
-- case 32:
-- info->var.red.offset = 16;
-- info->var.green.offset = 8;
-- info->var.blue.offset = 0;
-- info->var.red.length = 8;
-- info->var.green.length = 8;
-- info->var.blue.length = 8;
-- info->var.transp.offset = 24;
-- info->var.transp.length = 8;
-- break;
-- default:
-- break;
-- }
--
- fb->fbdev = info;
-
- par->intel_fb = intel_fb;
-- par->dev = dev;
-
- /* To allow resizeing without swapping buffers */
- DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
-@@ -625,307 +238,12 @@ out:
- return ret;
- }
-
--static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc)
--{
-- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-- struct intel_framebuffer *intel_fb;
-- struct drm_framebuffer *fb;
-- struct drm_connector *connector;
-- struct fb_info *info;
-- struct intelfb_par *par;
-- struct drm_mode_set *modeset;
-- unsigned int width, height;
-- int new_fb = 0;
-- int ret, i, conn_count;
--
-- if (!drm_helper_crtc_in_use(crtc))
-- return 0;
--
-- if (!crtc->desired_mode)
-- return 0;
--
-- width = crtc->desired_mode->hdisplay;
-- height = crtc->desired_mode->vdisplay;
--
-- /* is there an fb bound to this crtc already */
-- if (!intel_crtc->mode_set.fb) {
-- ret = intelfb_create(dev, width, height, width, height, &intel_fb);
-- if (ret)
-- return -EINVAL;
-- new_fb = 1;
-- } else {
-- fb = intel_crtc->mode_set.fb;
-- intel_fb = to_intel_framebuffer(fb);
-- if ((intel_fb->base.width < width) || (intel_fb->base.height < height))
-- return -EINVAL;
-- }
--
-- info = intel_fb->base.fbdev;
-- par = info->par;
--
-- modeset = &intel_crtc->mode_set;
-- modeset->fb = &intel_fb->base;
-- conn_count = 0;
-- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-- if (connector->encoder)
-- if (connector->encoder->crtc == modeset->crtc) {
-- modeset->connectors[conn_count] = connector;
-- conn_count++;
-- if (conn_count > INTELFB_CONN_LIMIT)
-- BUG();
-- }
-- }
--
-- for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
-- modeset->connectors[i] = NULL;
--
-- par->crtc_ids[0] = crtc->base.id;
--
-- modeset->num_connectors = conn_count;
-- if (modeset->crtc->desired_mode) {
-- if (modeset->mode)
-- drm_mode_destroy(dev, modeset->mode);
-- modeset->mode = drm_mode_duplicate(dev,
-- modeset->crtc->desired_mode);
-- }
--
-- par->crtc_count = 1;
--
-- if (new_fb) {
-- info->var.pixclock = -1;
-- if (register_framebuffer(info) < 0)
-- return -EINVAL;
-- } else
-- intelfb_set_par(info);
--
-- DRM_INFO("fb%d: %s frame buffer device\n", info->node,
-- info->fix.id);
--
-- /* Switch back to kernel console on panic */
-- kernelfb_mode = *modeset;
-- atomic_notifier_chain_register(&panic_notifier_list, &paniced);
-- DRM_DEBUG("registered panic notifier\n");
--
-- return 0;
--}
--
--static int intelfb_multi_fb_probe(struct drm_device *dev)
--{
--
-- struct drm_crtc *crtc;
-- int ret = 0;
--
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- ret = intelfb_multi_fb_probe_crtc(dev, crtc);
-- if (ret)
-- return ret;
-- }
-- return ret;
--}
--
--static int intelfb_single_fb_probe(struct drm_device *dev)
--{
-- struct drm_crtc *crtc;
-- struct drm_connector *connector;
-- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
-- unsigned int surface_width = 0, surface_height = 0;
-- int new_fb = 0;
-- int crtc_count = 0;
-- int ret, i, conn_count = 0;
-- struct intel_framebuffer *intel_fb;
-- struct fb_info *info;
-- struct intelfb_par *par;
-- struct drm_mode_set *modeset = NULL;
--
-- DRM_DEBUG("\n");
--
-- /* Get a count of crtcs now in use and new min/maxes width/heights */
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- if (!drm_helper_crtc_in_use(crtc))
-- continue;
--
-- crtc_count++;
-- if (!crtc->desired_mode)
-- continue;
--
-- /* Smallest mode determines console size... */
-- if (crtc->desired_mode->hdisplay < fb_width)
-- fb_width = crtc->desired_mode->hdisplay;
--
-- if (crtc->desired_mode->vdisplay < fb_height)
-- fb_height = crtc->desired_mode->vdisplay;
--
-- /* ... but largest for memory allocation dimensions */
-- if (crtc->desired_mode->hdisplay > surface_width)
-- surface_width = crtc->desired_mode->hdisplay;
--
-- if (crtc->desired_mode->vdisplay > surface_height)
-- surface_height = crtc->desired_mode->vdisplay;
-- }
--
-- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
-- /* hmm everyone went away - assume VGA cable just fell out
-- and will come back later. */
-- DRM_DEBUG("no CRTCs available?\n");
-- return 0;
-- }
--
--//fail
-- /* Find the fb for our new config */
-- if (list_empty(&dev->mode_config.fb_kernel_list)) {
-- DRM_DEBUG("creating new fb (console size %dx%d, "
-- "buffer size %dx%d)\n", fb_width, fb_height,
-- surface_width, surface_height);
-- ret = intelfb_create(dev, fb_width, fb_height, surface_width,
-- surface_height, &intel_fb);
-- if (ret)
-- return -EINVAL;
-- new_fb = 1;
-- } else {
-- struct drm_framebuffer *fb;
--
-- fb = list_first_entry(&dev->mode_config.fb_kernel_list,
-- struct drm_framebuffer, filp_head);
-- intel_fb = to_intel_framebuffer(fb);
--
-- /* if someone hotplugs something bigger than we have already
-- * allocated, we are pwned. As really we can't resize an
-- * fbdev that is in the wild currently due to fbdev not really
-- * being designed for the lower layers moving stuff around
-- * under it.
-- * - so in the grand style of things - punt.
-- */
-- if ((fb->width < surface_width) ||
-- (fb->height < surface_height)) {
-- DRM_ERROR("fb not large enough for console\n");
-- return -EINVAL;
-- }
-- }
--// fail
--
-- info = intel_fb->base.fbdev;
-- par = info->par;
--
-- crtc_count = 0;
-- /*
-- * For each CRTC, set up the connector list for the CRTC's mode
-- * set configuration.
-- */
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--
-- modeset = &intel_crtc->mode_set;
-- modeset->fb = &intel_fb->base;
-- conn_count = 0;
-- list_for_each_entry(connector, &dev->mode_config.connector_list,
-- head) {
-- if (!connector->encoder)
-- continue;
--
-- if(connector->encoder->crtc == modeset->crtc) {
-- modeset->connectors[conn_count++] = connector;
-- if (conn_count > INTELFB_CONN_LIMIT)
-- BUG();
-- }
-- }
--
-- /* Zero out remaining connector pointers */
-- for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
-- modeset->connectors[i] = NULL;
--
-- par->crtc_ids[crtc_count++] = crtc->base.id;
--
-- modeset->num_connectors = conn_count;
-- if (modeset->crtc->desired_mode) {
-- if (modeset->mode)
-- drm_mode_destroy(dev, modeset->mode);
-- modeset->mode = drm_mode_duplicate(dev,
-- modeset->crtc->desired_mode);
-- }
-- }
-- par->crtc_count = crtc_count;
--
-- if (new_fb) {
-- info->var.pixclock = -1;
-- if (register_framebuffer(info) < 0)
-- return -EINVAL;
-- } else
-- intelfb_set_par(info);
--
-- DRM_INFO("fb%d: %s frame buffer device\n", info->node,
-- info->fix.id);
--
-- /* Switch back to kernel console on panic */
-- kernelfb_mode = *modeset;
-- atomic_notifier_chain_register(&panic_notifier_list, &paniced);
-- DRM_DEBUG("registered panic notifier\n");
--
-- return 0;
--}
--
--/**
-- * intelfb_restore - restore the framebuffer console (kernel) config
-- *
-- * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
-- */
--void intelfb_restore(void)
--{
-- int ret;
-- if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) {
-- DRM_ERROR("Failed to restore crtc configuration: %d\n",
-- ret);
-- }
--}
--
--static void intelfb_restore_work_fn(struct work_struct *ignored)
--{
-- intelfb_restore();
--}
--static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn);
--
--static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
--{
-- schedule_work(&intelfb_restore_work);
--}
--
--static struct sysrq_key_op sysrq_intelfb_restore_op = {
-- .handler = intelfb_sysrq,
-- .help_msg = "force-fb(V)",
-- .action_msg = "Restore framebuffer console",
--};
--
- int intelfb_probe(struct drm_device *dev)
- {
- int ret;
-
- DRM_DEBUG("\n");
--
-- /* something has changed in the lower levels of hell - deal with it
-- here */
--
-- /* two modes : a) 1 fb to rule all crtcs.
-- b) one fb per crtc.
-- two actions 1) new connected device
-- 2) device removed.
-- case a/1 : if the fb surface isn't big enough - resize the surface fb.
-- if the fb size isn't big enough - resize fb into surface.
-- if everything big enough configure the new crtc/etc.
-- case a/2 : undo the configuration
-- possibly resize down the fb to fit the new configuration.
-- case b/1 : see if it is on a new crtc - setup a new fb and add it.
-- case b/2 : teardown the new fb.
-- */
--
-- /* mode a first */
-- /* search for an fb */
-- if (i915_fbpercrtc == 1) {
-- ret = intelfb_multi_fb_probe(dev);
-- } else {
-- ret = intelfb_single_fb_probe(dev);
-- }
--
-- register_sysrq_key('v', &sysrq_intelfb_restore_op);
--
-+ ret = drm_fb_helper_single_fb_probe(dev, intelfb_create);
- return ret;
- }
- EXPORT_SYMBOL(intelfb_probe);
-@@ -940,13 +258,14 @@ int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
- info = fb->fbdev;
-
- if (info) {
-+ struct intelfb_par *par = info->par;
- unregister_framebuffer(info);
- iounmap(info->screen_base);
-+ if (info->par)
-+ drm_fb_helper_free(&par->helper);
- framebuffer_release(info);
- }
-
-- atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
-- memset(&kernelfb_mode, 0, sizeof(struct drm_mode_set));
- return 0;
- }
- EXPORT_SYMBOL(intelfb_remove);
-diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
-index 62b8bea..c7eab72 100644
---- a/drivers/gpu/drm/i915/intel_i2c.c
-+++ b/drivers/gpu/drm/i915/intel_i2c.c
-@@ -42,11 +42,11 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
- if (!IS_IGD(dev))
- return;
- if (enable)
-- I915_WRITE(CG_2D_DIS,
-- I915_READ(CG_2D_DIS) | DPCUNIT_CLOCK_GATE_DISABLE);
-+ I915_WRITE(DSPCLK_GATE_D,
-+ I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
- else
-- I915_WRITE(CG_2D_DIS,
-- I915_READ(CG_2D_DIS) & (~DPCUNIT_CLOCK_GATE_DISABLE));
-+ I915_WRITE(DSPCLK_GATE_D,
-+ I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
- }
-
- /*
-diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
-index 8df02ef..dafc0da 100644
---- a/drivers/gpu/drm/i915/intel_lvds.c
-+++ b/drivers/gpu/drm/i915/intel_lvds.c
-@@ -38,16 +38,6 @@
- #include "i915_drv.h"
- #include <linux/acpi.h>
-
--#define I915_LVDS "i915_lvds"
--
--/*
-- * the following four scaling options are defined.
-- * #define DRM_MODE_SCALE_NON_GPU 0
-- * #define DRM_MODE_SCALE_FULLSCREEN 1
-- * #define DRM_MODE_SCALE_NO_SCALE 2
-- * #define DRM_MODE_SCALE_ASPECT 3
-- */
--
- /* Private structure for the integrated LVDS support */
- struct intel_lvds_priv {
- int fitting_mode;
-@@ -336,7 +326,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- I915_WRITE(BCLRPAT_B, 0);
-
- switch (lvds_priv->fitting_mode) {
-- case DRM_MODE_SCALE_NO_SCALE:
-+ case DRM_MODE_SCALE_CENTER:
- /*
- * For centered modes, we have to calculate border widths &
- * heights and modify the values programmed into the CRTC.
-@@ -672,9 +662,8 @@ static int intel_lvds_set_property(struct drm_connector *connector,
- connector->encoder) {
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
-- if (value == DRM_MODE_SCALE_NON_GPU) {
-- DRM_DEBUG_KMS(I915_LVDS,
-- "non_GPU property is unsupported\n");
-+ if (value == DRM_MODE_SCALE_NONE) {
-+ DRM_DEBUG_KMS("no scaling not supported\n");
- return 0;
- }
- if (lvds_priv->fitting_mode == value) {
-@@ -731,8 +720,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
-
- static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
- {
-- DRM_DEBUG_KMS(I915_LVDS,
-- "Skipping LVDS initialization for %s\n", id->ident);
-+ DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
- return 1;
- }
-
-@@ -1027,7 +1015,7 @@ out:
- return;
-
- failed:
-- DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n");
-+ DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
- drm_connector_cleanup(connector);
-diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
-index d3b74ba..0bf28ef 100644
---- a/drivers/gpu/drm/i915/intel_sdvo.c
-+++ b/drivers/gpu/drm/i915/intel_sdvo.c
-@@ -37,7 +37,19 @@
- #include "intel_sdvo_regs.h"
-
- #undef SDVO_DEBUG
--#define I915_SDVO "i915_sdvo"
-+
-+static char *tv_format_names[] = {
-+ "NTSC_M" , "NTSC_J" , "NTSC_443",
-+ "PAL_B" , "PAL_D" , "PAL_G" ,
-+ "PAL_H" , "PAL_I" , "PAL_M" ,
-+ "PAL_N" , "PAL_NC" , "PAL_60" ,
-+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
-+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
-+ "SECAM_60"
-+};
-+
-+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
-+
- struct intel_sdvo_priv {
- u8 slave_addr;
-
-@@ -71,6 +83,15 @@ struct intel_sdvo_priv {
- */
- bool is_tv;
-
-+ /* This is for current tv format name */
-+ char *tv_format_name;
-+
-+ /* This contains all current supported TV format */
-+ char *tv_format_supported[TV_FORMAT_NUM];
-+ int format_supported_num;
-+ struct drm_property *tv_format_property;
-+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-+
- /**
- * This is set if we treat the device as HDMI, instead of DVI.
- */
-@@ -97,14 +118,6 @@ struct intel_sdvo_priv {
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
-
-- /**
-- * Current selected TV format.
-- *
-- * This is stored in the same structure that's passed to the device, for
-- * convenience.
-- */
-- struct intel_sdvo_tv_format tv_format;
--
- /*
- * supported encoding mode, used to determine whether HDMI is
- * supported
-@@ -114,6 +127,9 @@ struct intel_sdvo_priv {
- /* DDC bus used by this SDVO output */
- uint8_t ddc_bus;
-
-+ /* Mac mini hack -- use the same DDC as the analog connector */
-+ struct i2c_adapter *analog_ddc_bus;
-+
- int save_sdvo_mult;
- u16 save_active_outputs;
- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
-@@ -188,7 +204,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
- return true;
- }
-
-- DRM_DEBUG("i2c transfer returned %d\n", ret);
-+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
- return false;
- }
-
-@@ -298,7 +314,7 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int i;
-
-- DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ",
-+ DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(sdvo_priv), cmd);
- for (i = 0; i < args_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
-@@ -351,7 +367,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int i;
-
-- DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv));
-+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
- for (i = 0; i < response_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
- for (; i < 8; i++)
-@@ -668,10 +684,10 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
- status = intel_sdvo_read_response(intel_output, &response, 1);
-
- if (status != SDVO_CMD_STATUS_SUCCESS) {
-- DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
-+ DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
- return SDVO_CLOCK_RATE_MULT_1X;
- } else {
-- DRM_DEBUG("Current clock rate multiplier: %d\n", response);
-+ DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
- }
-
- return response;
-@@ -945,23 +961,28 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
-
- static void intel_sdvo_set_tv_format(struct intel_output *output)
- {
-+
-+ struct intel_sdvo_tv_format format;
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
-- struct intel_sdvo_tv_format *format, unset;
-- u8 status;
-+ uint32_t format_map, i;
-+ uint8_t status;
-
-- format = &sdvo_priv->tv_format;
-- memset(&unset, 0, sizeof(unset));
-- if (memcmp(format, &unset, sizeof(*format))) {
-- DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n",
-- SDVO_NAME(sdvo_priv));
-- format->ntsc_m = 1;
-- intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, format,
-- sizeof(*format));
-- status = intel_sdvo_read_response(output, NULL, 0);
-- if (status != SDVO_CMD_STATUS_SUCCESS)
-- DRM_DEBUG("%s: Failed to set TV format\n",
-- SDVO_NAME(sdvo_priv));
-- }
-+ for (i = 0; i < TV_FORMAT_NUM; i++)
-+ if (tv_format_names[i] == sdvo_priv->tv_format_name)
-+ break;
-+
-+ format_map = 1 << i;
-+ memset(&format, 0, sizeof(format));
-+ memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
-+ sizeof(format) : sizeof(format_map));
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map,
-+ sizeof(format));
-+
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ DRM_DEBUG("%s: Failed to set TV format\n",
-+ SDVO_NAME(sdvo_priv));
- }
-
- static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
-@@ -1230,8 +1251,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
- * a given it the status is a success, we succeeded.
- */
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
-- DRM_DEBUG("First %s output reported failure to sync\n",
-- SDVO_NAME(sdvo_priv));
-+ DRM_DEBUG_KMS("First %s output reported failure to "
-+ "sync\n", SDVO_NAME(sdvo_priv));
- }
-
- if (0)
-@@ -1326,8 +1347,8 @@ static void intel_sdvo_restore(struct drm_connector *connector)
- intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
-- DRM_DEBUG("First %s output reported failure to sync\n",
-- SDVO_NAME(sdvo_priv));
-+ DRM_DEBUG_KMS("First %s output reported failure to "
-+ "sync\n", SDVO_NAME(sdvo_priv));
- }
-
- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
-@@ -1405,7 +1426,7 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
- u8 response[2];
- u8 status;
- struct intel_output *intel_output;
-- DRM_DEBUG("\n");
-+ DRM_DEBUG_KMS("\n");
-
- if (!connector)
- return 0;
-@@ -1478,6 +1499,36 @@ intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
- return (caps > 1);
- }
-
-+static struct drm_connector *
-+intel_find_analog_connector(struct drm_device *dev)
-+{
-+ struct drm_connector *connector;
-+ struct intel_output *intel_output;
-+
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ intel_output = to_intel_output(connector);
-+ if (intel_output->type == INTEL_OUTPUT_ANALOG)
-+ return connector;
-+ }
-+ return NULL;
-+}
-+
-+static int
-+intel_analog_is_connected(struct drm_device *dev)
-+{
-+ struct drm_connector *analog_connector;
-+ analog_connector = intel_find_analog_connector(dev);
-+
-+ if (!analog_connector)
-+ return false;
-+
-+ if (analog_connector->funcs->detect(analog_connector) ==
-+ connector_status_disconnected)
-+ return false;
-+
-+ return true;
-+}
-+
- enum drm_connector_status
- intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
- {
-@@ -1488,6 +1539,15 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
-
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
-+
-+ /* when there is no edid and no monitor is connected with VGA
-+ * port, try to use the CRT ddc to read the EDID for DVI-connector
-+ */
-+ if (edid == NULL &&
-+ sdvo_priv->analog_ddc_bus &&
-+ !intel_analog_is_connected(intel_output->base.dev))
-+ edid = drm_get_edid(&intel_output->base,
-+ sdvo_priv->analog_ddc_bus);
- if (edid != NULL) {
- /* Don't report the output as connected if it's a DVI-I
- * connector with a non-digital EDID coming out.
-@@ -1516,10 +1576,11 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-
-- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
-+ intel_sdvo_write_cmd(intel_output,
-+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 2);
-
-- DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8);
-+ DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
-
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return connector_status_unknown;
-@@ -1540,50 +1601,32 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
- static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
- {
- struct intel_output *intel_output = to_intel_output(connector);
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ int num_modes;
-
- /* set the bus switch and get the modes */
-- intel_ddc_get_modes(intel_output);
-+ num_modes = intel_ddc_get_modes(intel_output);
-
--#if 0
-- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-- /* Mac mini hack. On this device, I get DDC through the analog, which
-- * load-detects as disconnected. I fail to DDC through the SDVO DDC,
-- * but it does load-detect as connected. So, just steal the DDC bits
-- * from analog when we fail at finding it the right way.
-+ /*
-+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
-+ * link between analog and digital outputs. So, if the regular SDVO
-+ * DDC fails, check to see if the analog output is disconnected, in
-+ * which case we'll look there for the digital DDC data.
- */
-- crt = xf86_config->output[0];
-- intel_output = crt->driver_private;
-- if (intel_output->type == I830_OUTPUT_ANALOG &&
-- crt->funcs->detect(crt) == XF86OutputStatusDisconnected) {
-- I830I2CInit(pScrn, &intel_output->pDDCBus, GPIOA, "CRTDDC_A");
-- edid_mon = xf86OutputGetEDID(crt, intel_output->pDDCBus);
-- xf86DestroyI2CBusRec(intel_output->pDDCBus, true, true);
-- }
-- if (edid_mon) {
-- xf86OutputSetEDID(output, edid_mon);
-- modes = xf86OutputGetEDIDModes(output);
-- }
--#endif
--}
-+ if (num_modes == 0 &&
-+ sdvo_priv->analog_ddc_bus &&
-+ !intel_analog_is_connected(intel_output->base.dev)) {
-+ struct i2c_adapter *digital_ddc_bus;
-
--/**
-- * This function checks the current TV format, and chooses a default if
-- * it hasn't been set.
-- */
--static void
--intel_sdvo_check_tv_format(struct intel_output *output)
--{
-- struct intel_sdvo_priv *dev_priv = output->dev_priv;
-- struct intel_sdvo_tv_format format;
-- uint8_t status;
-+ /* Switch to the analog ddc bus and try that
-+ */
-+ digital_ddc_bus = intel_output->ddc_bus;
-+ intel_output->ddc_bus = sdvo_priv->analog_ddc_bus;
-
-- intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMAT, NULL, 0);
-- status = intel_sdvo_read_response(output, &format, sizeof(format));
-- if (status != SDVO_CMD_STATUS_SUCCESS)
-- return;
-+ (void) intel_ddc_get_modes(intel_output);
-
-- memcpy(&dev_priv->tv_format, &format, sizeof(format));
-+ intel_output->ddc_bus = digital_ddc_bus;
-+ }
- }
-
- /*
-@@ -1656,17 +1699,26 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
- struct intel_output *output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
- struct intel_sdvo_sdtv_resolution_request tv_res;
-- uint32_t reply = 0;
-+ uint32_t reply = 0, format_map = 0;
-+ int i;
- uint8_t status;
-- int i = 0;
-
-- intel_sdvo_check_tv_format(output);
-
- /* Read the list of supported input resolutions for the selected TV
- * format.
- */
-- memset(&tv_res, 0, sizeof(tv_res));
-- memcpy(&tv_res, &sdvo_priv->tv_format, sizeof(tv_res));
-+ for (i = 0; i < TV_FORMAT_NUM; i++)
-+ if (tv_format_names[i] == sdvo_priv->tv_format_name)
-+ break;
-+
-+ format_map = (1 << i);
-+ memcpy(&tv_res, &format_map,
-+ sizeof(struct intel_sdvo_sdtv_resolution_request) >
-+ sizeof(format_map) ? sizeof(format_map) :
-+ sizeof(struct intel_sdvo_sdtv_resolution_request));
-+
-+ intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
-+
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
- &tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(output, &reply, 3);
-@@ -1681,6 +1733,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
- if (nmode)
- drm_mode_probed_add(connector, nmode);
- }
-+
- }
-
- static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
-@@ -1748,17 +1801,62 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
- intel_i2c_destroy(intel_output->i2c_bus);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
-+ if (sdvo_priv->analog_ddc_bus)
-+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
-
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(connector->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
-
-+ if (sdvo_priv->tv_format_property)
-+ drm_property_destroy(connector->dev,
-+ sdvo_priv->tv_format_property);
-+
- drm_sysfs_connector_remove(connector);
- drm_connector_cleanup(connector);
-
- kfree(intel_output);
- }
-
-+static int
-+intel_sdvo_set_property(struct drm_connector *connector,
-+ struct drm_property *property,
-+ uint64_t val)
-+{
-+ struct intel_output *intel_output = to_intel_output(connector);
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ struct drm_encoder *encoder = &intel_output->enc;
-+ struct drm_crtc *crtc = encoder->crtc;
-+ int ret = 0;
-+ bool changed = false;
-+
-+ ret = drm_connector_property_set_value(connector, property, val);
-+ if (ret < 0)
-+ goto out;
-+
-+ if (property == sdvo_priv->tv_format_property) {
-+ if (val >= TV_FORMAT_NUM) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ if (sdvo_priv->tv_format_name ==
-+ sdvo_priv->tv_format_supported[val])
-+ goto out;
-+
-+ sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
-+ changed = true;
-+ } else {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (changed && crtc)
-+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
-+ crtc->y, crtc->fb);
-+out:
-+ return ret;
-+}
-+
- static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
- .dpms = intel_sdvo_dpms,
- .mode_fixup = intel_sdvo_mode_fixup,
-@@ -1773,6 +1871,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .restore = intel_sdvo_restore,
- .detect = intel_sdvo_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
-+ .set_property = intel_sdvo_set_property,
- .destroy = intel_sdvo_destroy,
- };
-
-@@ -2013,10 +2112,9 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
-
- sdvo_priv->controlled_output = 0;
- memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
-- DRM_DEBUG_KMS(I915_SDVO,
-- "%s: Unknown SDVO output type (0x%02x%02x)\n",
-- SDVO_NAME(sdvo_priv),
-- bytes[0], bytes[1]);
-+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
-+ SDVO_NAME(sdvo_priv),
-+ bytes[0], bytes[1]);
- ret = false;
- }
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
-@@ -2029,6 +2127,55 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
-
- }
-
-+static void intel_sdvo_tv_create_property(struct drm_connector *connector)
-+{
-+ struct intel_output *intel_output = to_intel_output(connector);
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ struct intel_sdvo_tv_format format;
-+ uint32_t format_map, i;
-+ uint8_t status;
-+
-+ intel_sdvo_set_target_output(intel_output,
-+ sdvo_priv->controlled_output);
-+
-+ intel_sdvo_write_cmd(intel_output,
-+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
-+ status = intel_sdvo_read_response(intel_output,
-+ &format, sizeof(format));
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return;
-+
-+ memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ?
-+ sizeof(format_map) : sizeof(format));
-+
-+ if (format_map == 0)
-+ return;
-+
-+ sdvo_priv->format_supported_num = 0;
-+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
-+ if (format_map & (1 << i)) {
-+ sdvo_priv->tv_format_supported
-+ [sdvo_priv->format_supported_num++] =
-+ tv_format_names[i];
-+ }
-+
-+
-+ sdvo_priv->tv_format_property =
-+ drm_property_create(
-+ connector->dev, DRM_MODE_PROP_ENUM,
-+ "mode", sdvo_priv->format_supported_num);
-+
-+ for (i = 0; i < sdvo_priv->format_supported_num; i++)
-+ drm_property_add_enum(
-+ sdvo_priv->tv_format_property, i,
-+ i, sdvo_priv->tv_format_supported[i]);
-+
-+ sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
-+ drm_connector_attach_property(
-+ connector, sdvo_priv->tv_format_property, 0);
-+
-+}
-+
- bool intel_sdvo_init(struct drm_device *dev, int output_device)
- {
- struct drm_connector *connector;
-@@ -2066,18 +2213,22 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
- /* Read the regs to test if we can talk to the device */
- for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
-- DRM_DEBUG_KMS(I915_SDVO,
-- "No SDVO device found on SDVO%c\n",
-+ DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
- goto err_i2c;
- }
- }
-
- /* setup the DDC bus. */
-- if (output_device == SDVOB)
-+ if (output_device == SDVOB) {
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
-- else
-+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
-+ "SDVOB/VGA DDC BUS");
-+ } else {
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
-+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
-+ "SDVOC/VGA DDC BUS");
-+ }
-
- if (intel_output->ddc_bus == NULL)
- goto err_i2c;
-@@ -2090,7 +2241,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
-
- if (intel_sdvo_output_setup(intel_output,
- sdvo_priv->caps.output_flags) != true) {
-- DRM_DEBUG("SDVO output failed to setup on SDVO%c\n",
-+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
- goto err_i2c;
- }
-@@ -2111,6 +2262,8 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
- drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
-
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
-+ if (sdvo_priv->is_tv)
-+ intel_sdvo_tv_create_property(connector);
- drm_sysfs_connector_add(connector);
-
- intel_sdvo_select_ddc_bus(sdvo_priv);
-@@ -2123,7 +2276,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
- &sdvo_priv->pixel_clock_max);
-
-
-- DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, "
-+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
- "clock range %dMHz - %dMHz, "
- "input 1: %c, input 2: %c, "
- "output 1: %c, output 2: %c\n",
-@@ -2143,6 +2296,8 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
- return true;
-
- err_i2c:
-+ if (sdvo_priv->analog_ddc_bus != NULL)
-+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
- if (intel_output->ddc_bus != NULL)
- intel_i2c_destroy(intel_output->ddc_bus);
- if (intel_output->i2c_bus != NULL)
-diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
-index 2fbe13a..a6c686c 100644
---- a/drivers/gpu/drm/i915/intel_tv.c
-+++ b/drivers/gpu/drm/i915/intel_tv.c
-@@ -1437,6 +1437,35 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
- return type;
- }
-
-+/*
-+ * Here we set accurate tv format according to connector type
-+ * i.e Component TV should not be assigned by NTSC or PAL
-+ */
-+static void intel_tv_find_better_format(struct drm_connector *connector)
-+{
-+ struct intel_output *intel_output = to_intel_output(connector);
-+ struct intel_tv_priv *tv_priv = intel_output->dev_priv;
-+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
-+ int i;
-+
-+ if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
-+ tv_mode->component_only)
-+ return;
-+
-+
-+ for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
-+ tv_mode = tv_modes + i;
-+
-+ if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
-+ tv_mode->component_only)
-+ break;
-+ }
-+
-+ tv_priv->tv_format = tv_mode->name;
-+ drm_connector_property_set_value(connector,
-+ connector->dev->mode_config.tv_mode_property, i);
-+}
-+
- /**
- * Detect the TV connection.
- *
-@@ -1473,6 +1502,7 @@ intel_tv_detect(struct drm_connector *connector)
- if (type < 0)
- return connector_status_disconnected;
-
-+ intel_tv_find_better_format(connector);
- return connector_status_connected;
- }
-
-diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
-index 6c67a02..3c917fb 100644
---- a/drivers/gpu/drm/mga/mga_dma.c
-+++ b/drivers/gpu/drm/mga/mga_dma.c
-@@ -444,7 +444,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
- {
- drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
-- unsigned int warp_size = mga_warp_microcode_size(dev_priv);
-+ unsigned int warp_size = MGA_WARP_UCODE_SIZE;
- int err;
- unsigned offset;
- const unsigned secondary_size = dma_bs->secondary_bin_count
-@@ -619,7 +619,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
- {
- drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
-- unsigned int warp_size = mga_warp_microcode_size(dev_priv);
-+ unsigned int warp_size = MGA_WARP_UCODE_SIZE;
- unsigned int primary_size;
- unsigned int bin_count;
- int err;
-diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
-index 3d264f2..be6c6b9 100644
---- a/drivers/gpu/drm/mga/mga_drv.h
-+++ b/drivers/gpu/drm/mga/mga_drv.h
-@@ -177,7 +177,6 @@ extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
- extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
-
- /* mga_warp.c */
--extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
- extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
- extern int mga_warp_init(drm_mga_private_t * dev_priv);
-
-diff --git a/drivers/gpu/drm/mga/mga_ucode.h b/drivers/gpu/drm/mga/mga_ucode.h
-deleted file mode 100644
-index b611e27..0000000
---- a/drivers/gpu/drm/mga/mga_ucode.h
-+++ /dev/null
-@@ -1,41 +0,0 @@
--/* mga_ucode.h -- Matrox G200/G400 WARP engine microcode -*- linux-c -*-
-- * Created: Thu Jan 11 21:20:43 2001 by gareth@valinux.com
-- *
-- * Copyright 1999 Matrox Graphics Inc.
-- * All Rights Reserved.
-- *
-- * Permission is hereby granted, free of charge, to any person obtaining a
-- * copy of this software and associated documentation files (the "Software"),
-- * to deal in the Software without restriction, including without limitation
-- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-- * and/or sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following conditions:
-- *
-- * The above copyright notice and this permission notice shall be included
-- * in all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-- * MATROX GRAPHICS INC., OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
-- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
-- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-- *
-- * Kernel-based WARP engine management:
-- * Gareth Hughes <gareth@valinux.com>
-- */
--
--/*
-- * WARP pipes are named according to the functions they perform, where:
-- *
-- * - T stands for computation of texture stage 0
-- * - T2 stands for computation of both texture stage 0 and texture stage 1
-- * - G stands for computation of triangle intensity (Gouraud interpolation)
-- * - Z stands for computation of Z buffer interpolation
-- * - S stands for computation of specular highlight
-- * - A stands for computation of the alpha channel
-- * - F stands for computation of vertex fog interpolation
-- */
--
--/*(DEBLOBBED)*/
-diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
-index 651b93c..9aad484 100644
---- a/drivers/gpu/drm/mga/mga_warp.c
-+++ b/drivers/gpu/drm/mga/mga_warp.c
-@@ -27,43 +32,92 @@
- * Gareth Hughes <gareth@valinux.com>
- */
-
-+#include <linux/firmware.h>
-+#include <linux/ihex.h>
-+#include <linux/platform_device.h>
-+
- #include "drmP.h"
- #include "drm.h"
- #include "mga_drm.h"
- #include "mga_drv.h"
--#include "mga_ucode.h"
-
-+#define FIRMWARE_G200 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_G400 "/*(DEBLOBBED)*/"
-+
-+/*(DEBLOBBED)*/
-+
- #define MGA_WARP_CODE_ALIGN 256 /* in bytes */
--
--/*(DEBLOBBED)*/
-+#define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN)
-
--unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
-+int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
- {
-+ unsigned char *vcbase = dev_priv->warp->handle;
-+ unsigned long pcbase = dev_priv->warp->offset;
-+ const char *firmware_name;
-+ struct platform_device *pdev;
-+ const struct firmware *fw = NULL;
-+ const struct ihex_binrec *rec;
-+ unsigned int size;
-+ int n_pipes, where;
-+ int rc = 0;
-+
- switch (dev_priv->chipset) {
-- /*(DEBLOBBED)*/
-+ case MGA_CARD_TYPE_G400:
-+ case MGA_CARD_TYPE_G550:
-+ firmware_name = FIRMWARE_G400;
-+ n_pipes = MGA_MAX_G400_PIPES;
-+ break;
-+ case MGA_CARD_TYPE_G200:
-+ firmware_name = FIRMWARE_G200;
-+ n_pipes = MGA_MAX_G200_PIPES;
-+ break;
- default:
-- return 0;
-+ return -EINVAL;
- }
--}
--
--/*(DEBLOBBED)*/
--
--int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
--{
-- const unsigned int size = mga_warp_microcode_size(dev_priv);
-+ size = 0;
-+ where = 0;
-+ for (rec = (const struct ihex_binrec *)fw->data;
-+ rec;
-+ rec = ihex_next_binrec(rec)) {
-+ size += WARP_UCODE_SIZE(be16_to_cpu(rec->len));
-+ where++;
-+ }
-
-+ if (where != n_pipes) {
-+ DRM_ERROR("mga: Invalid microcode \"%s\"\n", firmware_name);
-+ rc = -EINVAL;
-+ goto out;
-+ }
-+ size = PAGE_ALIGN(size);
- DRM_DEBUG("MGA ucode size = %d bytes\n", size);
- if (size > dev_priv->warp->size) {
- DRM_ERROR("microcode too large! (%u > %lu)\n",
- size, dev_priv->warp->size);
-- return -ENOMEM;
-+ rc = -ENOMEM;
-+ goto out;
- }
-
-- switch (dev_priv->chipset) {
-- /*(DEBLOBBED)*/
-- default:
-- return -EINVAL;
-+ memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
-+
-+ where = 0;
-+ for (rec = (const struct ihex_binrec *)fw->data;
-+ rec;
-+ rec = ihex_next_binrec(rec)) {
-+ unsigned int src_size, dst_size;
-+
-+ DRM_DEBUG(" pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase);
-+ dev_priv->warp_pipe_phys[where] = pcbase;
-+ src_size = be16_to_cpu(rec->len);
-+ dst_size = WARP_UCODE_SIZE(src_size);
-+ memcpy(vcbase, rec->data, src_size);
-+ pcbase += dst_size;
-+ vcbase += dst_size;
-+ where++;
- }
-+
-+out:
-+ release_firmware(fw);
-+ return rc;
- }
-
- #define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
-diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
-index c75fd35..4c39a40 100644
---- a/drivers/gpu/drm/r128/r128_cce.c
-+++ b/drivers/gpu/drm/r128/r128_cce.c
-@@ -29,6 +29,9 @@
- * Gareth Hughes <gareth@valinux.com>
- */
-
-+#include <linux/firmware.h>
-+#include <linux/platform_device.h>
-+
- #include "drmP.h"
- #include "drm.h"
- #include "r128_drm.h"
-@@ -36,8 +39,9 @@
-
- #define R128_FIFO_DEBUG 0
-
--/* CCE microcode (from ATI) */
--/*(DEBLOBBED)*/
-+#define FIRMWARE_NAME "/*(DEBLOBBED)*/"
-+
-+/*(DEBLOBBED)*/
-
- static int R128_READ_PLL(struct drm_device * dev, int addr)
- {
-@@ -176,14 +138,51 @@ static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
- */
-
- /* Load the microcode for the CCE */
--#define r128_cce_load_microcode(dev_priv) \
-- do { \
-- DRM_ERROR("Missing Free microcode!\n"); \
-- dev->dev_private = (void *)dev_priv; \
-- r128_do_cleanup_cce(dev); \
-- return -EINVAL; \
-- } while (0)
--/*(DEBLOBBED)*/
-+static int r128_cce_load_microcode(drm_r128_private_t *dev_priv)
-+{
-+ struct platform_device *pdev;
-+ const struct firmware *fw;
-+ const __be32 *fw_data;
-+ int rc, i;
-+
-+ DRM_DEBUG("\n");
-+
-+ pdev = platform_device_register_simple("r128_cce", 0, NULL, 0);
-+ if (IS_ERR(pdev)) {
-+ printk(KERN_ERR "r128_cce: Failed to register firmware\n");
-+ return PTR_ERR(pdev);
-+ }
-+ rc = reject_firmware(&fw, FIRMWARE_NAME, &pdev->dev);
-+ platform_device_unregister(pdev);
-+ if (rc) {
-+ printk(KERN_ERR "r128_cce: Failed to load firmware \"%s\"\n",
-+ FIRMWARE_NAME);
-+ return rc;
-+ }
-+
-+ if (fw->size != 256 * 8) {
-+ printk(KERN_ERR
-+ "r128_cce: Bogus length %zu in firmware \"%s\"\n",
-+ fw->size, FIRMWARE_NAME);
-+ rc = -EINVAL;
-+ goto out_release;
-+ }
-+
-+ r128_do_wait_for_idle(dev_priv);
-+
-+ fw_data = (const __be32 *)fw->data;
-+ R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
-+ for (i = 0; i < 256; i++) {
-+ R128_WRITE(R128_PM4_MICROCODE_DATAH,
-+ be32_to_cpup(&fw_data[i * 2]));
-+ R128_WRITE(R128_PM4_MICROCODE_DATAL,
-+ be32_to_cpup(&fw_data[i * 2 + 1]));
-+ }
-+
-+out_release:
-+ release_firmware(fw);
-+ return rc;
-+}
-
- /* Flush any pending commands to the CCE. This should only be used just
- * prior to a wait for idle, as it informs the engine that the command
-@@ -350,9 +342,15 @@ static void r128_cce_init_ring_buffer(struct drm_device * dev,
- static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
- {
- drm_r128_private_t *dev_priv;
-+ int rc;
-
- DRM_DEBUG("\n");
-
-+ if (dev->dev_private) {
-+ DRM_DEBUG("called when already initialized\n");
-+ return -EINVAL;
-+ }
-+
- dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-@@ -575,13 +573,18 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
- #endif
-
- r128_cce_init_ring_buffer(dev, dev_priv);
-- r128_cce_load_microcode(dev_priv);
-+ rc = r128_cce_load_microcode(dev_priv);
-
- dev->dev_private = (void *)dev_priv;
-
- r128_do_engine_reset(dev);
-
-- return 0;
-+ if (rc) {
-+ DRM_ERROR("Failed to load firmware!\n");
-+ r128_do_cleanup_cce(dev);
-+ }
-+
-+ return rc;
- }
-
- int r128_do_cleanup_cce(struct drm_device * dev)
-@@ -649,6 +652,8 @@ int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_pri
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
- DRM_DEBUG("while CCE running\n");
- return 0;
-@@ -671,6 +676,8 @@ int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- /* Flush any pending CCE commands. This ensures any outstanding
- * commands are exectuted by the engine before we turn it off.
- */
-@@ -708,10 +715,7 @@ int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_pri
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-- if (!dev_priv) {
-- DRM_DEBUG("called before init done\n");
-- return -EINVAL;
-- }
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- r128_do_cce_reset(dev_priv);
-
-@@ -728,6 +732,8 @@ int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- if (dev_priv->cce_running) {
- r128_do_cce_flush(dev_priv);
- }
-@@ -741,6 +747,8 @@ int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev->dev_private);
-+
- return r128_do_engine_reset(dev);
- }
-
-diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
-index 797a26c..3c60829 100644
---- a/drivers/gpu/drm/r128/r128_drv.h
-+++ b/drivers/gpu/drm/r128/r128_drv.h
-@@ -422,6 +422,14 @@ static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
- * Misc helper macros
- */
-
-+#define DEV_INIT_TEST_WITH_RETURN(_dev_priv) \
-+do { \
-+ if (!_dev_priv) { \
-+ DRM_ERROR("called with no initialization\n"); \
-+ return -EINVAL; \
-+ } \
-+} while (0)
-+
- #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
- do { \
- drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
-diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
-index 026a48c..af2665c 100644
---- a/drivers/gpu/drm/r128/r128_state.c
-+++ b/drivers/gpu/drm/r128/r128_state.c
-@@ -1244,14 +1244,18 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
- static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
- drm_r128_private_t *dev_priv = dev->dev_private;
-- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
-+ drm_r128_sarea_t *sarea_priv;
- drm_r128_clear_t *clear = data;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
-+ sarea_priv = dev_priv->sarea_priv;
-+
- if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
-
-@@ -1312,6 +1316,8 @@ static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *fi
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
- if (!dev_priv->page_flipping)
-@@ -1331,6 +1337,8 @@ static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *fi
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
- if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
-@@ -1354,10 +1362,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-- if (!dev_priv) {
-- DRM_ERROR("called with no initialization\n");
-- return -EINVAL;
-- }
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
- DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
-@@ -1410,10 +1415,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-- if (!dev_priv) {
-- DRM_ERROR("called with no initialization\n");
-- return -EINVAL;
-- }
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
- elts->idx, elts->start, elts->end, elts->discard);
-@@ -1476,6 +1478,8 @@ static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *fi
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
-
- if (blit->idx < 0 || blit->idx >= dma->buf_count) {
-@@ -1501,6 +1505,8 @@ static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *f
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
- ret = -EINVAL;
-@@ -1531,6 +1537,8 @@ static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-+
- if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
- return -EFAULT;
-
-@@ -1555,10 +1563,7 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-- if (!dev_priv) {
-- DRM_ERROR("called with no initialization\n");
-- return -EINVAL;
-- }
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
- indirect->idx, indirect->start, indirect->end,
-@@ -1620,10 +1625,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
- drm_r128_getparam_t *param = data;
- int value;
-
-- if (!dev_priv) {
-- DRM_ERROR("called with no initialization\n");
-- return -EINVAL;
-- }
-+ DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
-
-diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
-index 2168d67..5982321 100644
---- a/drivers/gpu/drm/radeon/Kconfig
-+++ b/drivers/gpu/drm/radeon/Kconfig
-@@ -1,7 +1,6 @@
- config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default"
- depends on DRM_RADEON
-- select DRM_TTM
- help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
-diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
-index 013d380..09a2892 100644
---- a/drivers/gpu/drm/radeon/Makefile
-+++ b/drivers/gpu/drm/radeon/Makefile
-@@ -3,18 +3,53 @@
- # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
- ccflags-y := -Iinclude/drm
-+
-+hostprogs-y := mkregtable
-+
-+quiet_cmd_mkregtable = MKREGTABLE $@
-+ cmd_mkregtable = $(obj)/mkregtable $< > $@
-+
-+$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
-+ $(call if_changed,mkregtable)
-+
-+$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
-+ $(call if_changed,mkregtable)
-+
-+$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
-+ $(call if_changed,mkregtable)
-+
-+$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
-+ $(call if_changed,mkregtable)
-+
-+$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
-+ $(call if_changed,mkregtable)
-+
-+$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
-+ $(call if_changed,mkregtable)
-+
-+$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
-+
-+$(obj)/r200.o: $(obj)/r200_reg_safe.h
-+
-+$(obj)/rv515.o: $(obj)/rv515_reg_safe.h
-+
-+$(obj)/r300.o: $(obj)/r300_reg_safe.h
-+
-+$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
-+
- radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
- radeon_irq.o r300_cmdbuf.o r600_cp.o
--
--radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
-+# add KMS driver
-+radeon-y += radeon_device.o radeon_kms.o \
- radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
- atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
- radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
- radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
- radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
- radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
-- rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \
-- radeon_test.o
-+ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
-+ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
-+ r600_blit_kms.o
-
- radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
-
-diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
-index cf67928..5d40208 100644
---- a/drivers/gpu/drm/radeon/atombios.h
-+++ b/drivers/gpu/drm/radeon/atombios.h
-@@ -2374,6 +2374,17 @@ typedef struct _ATOM_ANALOG_TV_INFO {
- ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
- } ATOM_ANALOG_TV_INFO;
-
-+#define MAX_SUPPORTED_TV_TIMING_V1_2 3
-+
-+typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
-+ ATOM_COMMON_TABLE_HEADER sHeader;
-+ UCHAR ucTV_SupportedStandard;
-+ UCHAR ucTV_BootUpDefaultStandard;
-+ UCHAR ucExt_TV_ASIC_ID;
-+ UCHAR ucExt_TV_ASIC_SlaveAddr;
-+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
-+} ATOM_ANALOG_TV_INFO_V1_2;
-+
- /**************************************************************************/
- /* VRAM usage and their defintions */
-
-diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
-index 74d034f..a7edd0f 100644
---- a/drivers/gpu/drm/radeon/atombios_crtc.c
-+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
-@@ -31,6 +31,10 @@
- #include "atom.h"
- #include "atom-bits.h"
-
-+/* evil but including atombios.h is much worse */
-+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
-+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing,
-+ int32_t *pixel_clock);
- static void atombios_overscan_setup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-@@ -89,17 +93,32 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- ENABLE_SCALER_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
-+
- /* fixme - fill in enc_priv for atom dac */
- enum radeon_tv_std tv_std = TV_STD_NTSC;
-+ bool is_tv = false, is_cv = false;
-+ struct drm_encoder *encoder;
-
- if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
- return;
-
-+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+ /* find tv std */
-+ if (encoder->crtc == crtc) {
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
-+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
-+ tv_std = tv_dac->tv_std;
-+ is_tv = true;
-+ }
-+ }
-+ }
-+
- memset(&args, 0, sizeof(args));
-
- args.ucScaler = radeon_crtc->crtc_id;
-
-- if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) {
-+ if (is_tv) {
- switch (tv_std) {
- case TV_STD_NTSC:
- default:
-@@ -128,7 +147,7 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
- break;
- }
- args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
-- } else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) {
-+ } else if (is_cv) {
- args.ucTVStandard = ATOM_TV_CV;
- args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
- } else {
-@@ -151,9 +170,9 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
- }
- }
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-- if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
-- && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
-- atom_rv515_force_tv_scaler(rdev);
-+ if ((is_tv || is_cv)
-+ && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
-+ atom_rv515_force_tv_scaler(rdev, radeon_crtc);
- }
- }
-
-@@ -370,6 +389,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
- pll_flags |= RADEON_PLL_USE_REF_DIV;
- }
- radeon_encoder = to_radeon_encoder(encoder);
-+ break;
- }
- }
-
-@@ -551,42 +571,68 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
- struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
-+ int need_tv_timings = 0;
-+ bool ret;
-
- /* TODO color tiling */
- memset(&crtc_timing, 0, sizeof(crtc_timing));
-
-- /* TODO tv */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
--
-+ /* find tv std */
-+ if (encoder->crtc == crtc) {
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+
-+ if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
-+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
-+ if (tv_dac) {
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M)
-+ need_tv_timings = 1;
-+ else
-+ need_tv_timings = 2;
-+ break;
-+ }
-+ }
-+ }
- }
-
- crtc_timing.ucCRTC = radeon_crtc->crtc_id;
-- crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
-- crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
-- crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
-- crtc_timing.usH_SyncWidth =
-- adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
-+ if (need_tv_timings) {
-+ ret = radeon_atom_get_tv_timings(rdev, need_tv_timings - 1,
-+ &crtc_timing, &adjusted_mode->clock);
-+ if (ret == false)
-+ need_tv_timings = 0;
-+ }
-
-- crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
-- crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
-- crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
-- crtc_timing.usV_SyncWidth =
-- adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
-+ if (!need_tv_timings) {
-+ crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
-+ crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
-+ crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
-+ crtc_timing.usH_SyncWidth =
-+ adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
-
-- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-- crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
-+ crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
-+ crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
-+ crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
-+ crtc_timing.usV_SyncWidth =
-+ adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
-
-- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-- crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
-+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
-
-- if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
-- crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
-+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
-
-- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
-- crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
-+ if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
-+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
-
-- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-- crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
-+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
-+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
-+
-+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
-+ crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
-+ }
-
- atombios_crtc_set_pll(crtc, adjusted_mode);
- atombios_crtc_set_timing(crtc, &crtc_timing);
-diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
-new file mode 100644
-index 0000000..d4e6e6e
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/avivod.h
-@@ -0,0 +1,60 @@
-+/*
-+ * Copyright 2009 Advanced Micro Devices, Inc.
-+ * Copyright 2009 Red Hat Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: Dave Airlie
-+ * Alex Deucher
-+ * Jerome Glisse
-+ */
-+#ifndef AVIVOD_H
-+#define AVIVOD_H
-+
-+
-+#define D1CRTC_CONTROL 0x6080
-+#define CRTC_EN (1 << 0)
-+#define D1CRTC_UPDATE_LOCK 0x60E8
-+#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
-+#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
-+
-+#define D2CRTC_CONTROL 0x6880
-+#define D2CRTC_UPDATE_LOCK 0x68E8
-+#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
-+#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
-+
-+#define D1VGA_CONTROL 0x0330
-+#define DVGA_CONTROL_MODE_ENABLE (1 << 0)
-+#define DVGA_CONTROL_TIMING_SELECT (1 << 8)
-+#define DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
-+#define DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
-+#define DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
-+#define DVGA_CONTROL_ROTATE (1 << 24)
-+#define D2VGA_CONTROL 0x0338
-+
-+#define VGA_HDP_CONTROL 0x328
-+#define VGA_MEM_PAGE_SELECT_EN (1 << 0)
-+#define VGA_MEMORY_DISABLE (1 << 4)
-+#define VGA_RBBM_LOCK_DISABLE (1 << 8)
-+#define VGA_SOFT_RESET (1 << 16)
-+#define VGA_MEMORY_BASE_ADDRESS 0x0310
-+#define VGA_RENDER_CONTROL 0x0300
-+#define VGA_VSTATUS_CNTL_MASK 0x00030000
-+
-+#endif
-diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
-new file mode 100644
-index 0000000..fb211e5
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/mkregtable.c
-@@ -0,0 +1,720 @@
-+/* utility to create the register check tables
-+ * this includes inlined list.h safe for userspace.
-+ *
-+ * Copyright 2009 Jerome Glisse
-+ * Copyright 2009 Red Hat Inc.
-+ *
-+ * Authors:
-+ * Jerome Glisse
-+ * Dave Airlie
-+ */
-+
-+#include <sys/types.h>
-+#include <stdlib.h>
-+#include <string.h>
-+#include <stdio.h>
-+#include <regex.h>
-+#include <libgen.h>
-+
-+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-+/**
-+ * container_of - cast a member of a structure out to the containing structure
-+ * @ptr: the pointer to the member.
-+ * @type: the type of the container struct this is embedded in.
-+ * @member: the name of the member within the struct.
-+ *
-+ */
-+#define container_of(ptr, type, member) ({ \
-+ const typeof(((type *)0)->member)*__mptr = (ptr); \
-+ (type *)((char *)__mptr - offsetof(type, member)); })
-+
-+/*
-+ * Simple doubly linked list implementation.
-+ *
-+ * Some of the internal functions ("__xxx") are useful when
-+ * manipulating whole lists rather than single entries, as
-+ * sometimes we already know the next/prev entries and we can
-+ * generate better code by using them directly rather than
-+ * using the generic single-entry routines.
-+ */
-+
-+struct list_head {
-+ struct list_head *next, *prev;
-+};
-+
-+#define LIST_HEAD_INIT(name) { &(name), &(name) }
-+
-+#define LIST_HEAD(name) \
-+ struct list_head name = LIST_HEAD_INIT(name)
-+
-+static inline void INIT_LIST_HEAD(struct list_head *list)
-+{
-+ list->next = list;
-+ list->prev = list;
-+}
-+
-+/*
-+ * Insert a new entry between two known consecutive entries.
-+ *
-+ * This is only for internal list manipulation where we know
-+ * the prev/next entries already!
-+ */
-+#ifndef CONFIG_DEBUG_LIST
-+static inline void __list_add(struct list_head *new,
-+ struct list_head *prev, struct list_head *next)
-+{
-+ next->prev = new;
-+ new->next = next;
-+ new->prev = prev;
-+ prev->next = new;
-+}
-+#else
-+extern void __list_add(struct list_head *new,
-+ struct list_head *prev, struct list_head *next);
-+#endif
-+
-+/**
-+ * list_add - add a new entry
-+ * @new: new entry to be added
-+ * @head: list head to add it after
-+ *
-+ * Insert a new entry after the specified head.
-+ * This is good for implementing stacks.
-+ */
-+static inline void list_add(struct list_head *new, struct list_head *head)
-+{
-+ __list_add(new, head, head->next);
-+}
-+
-+/**
-+ * list_add_tail - add a new entry
-+ * @new: new entry to be added
-+ * @head: list head to add it before
-+ *
-+ * Insert a new entry before the specified head.
-+ * This is useful for implementing queues.
-+ */
-+static inline void list_add_tail(struct list_head *new, struct list_head *head)
-+{
-+ __list_add(new, head->prev, head);
-+}
-+
-+/*
-+ * Delete a list entry by making the prev/next entries
-+ * point to each other.
-+ *
-+ * This is only for internal list manipulation where we know
-+ * the prev/next entries already!
-+ */
-+static inline void __list_del(struct list_head *prev, struct list_head *next)
-+{
-+ next->prev = prev;
-+ prev->next = next;
-+}
-+
-+/**
-+ * list_del - deletes entry from list.
-+ * @entry: the element to delete from the list.
-+ * Note: list_empty() on entry does not return true after this, the entry is
-+ * in an undefined state.
-+ */
-+#ifndef CONFIG_DEBUG_LIST
-+static inline void list_del(struct list_head *entry)
-+{
-+ __list_del(entry->prev, entry->next);
-+ entry->next = (void *)0xDEADBEEF;
-+ entry->prev = (void *)0xBEEFDEAD;
-+}
-+#else
-+extern void list_del(struct list_head *entry);
-+#endif
-+
-+/**
-+ * list_replace - replace old entry by new one
-+ * @old : the element to be replaced
-+ * @new : the new element to insert
-+ *
-+ * If @old was empty, it will be overwritten.
-+ */
-+static inline void list_replace(struct list_head *old, struct list_head *new)
-+{
-+ new->next = old->next;
-+ new->next->prev = new;
-+ new->prev = old->prev;
-+ new->prev->next = new;
-+}
-+
-+static inline void list_replace_init(struct list_head *old,
-+ struct list_head *new)
-+{
-+ list_replace(old, new);
-+ INIT_LIST_HEAD(old);
-+}
-+
-+/**
-+ * list_del_init - deletes entry from list and reinitialize it.
-+ * @entry: the element to delete from the list.
-+ */
-+static inline void list_del_init(struct list_head *entry)
-+{
-+ __list_del(entry->prev, entry->next);
-+ INIT_LIST_HEAD(entry);
-+}
-+
-+/**
-+ * list_move - delete from one list and add as another's head
-+ * @list: the entry to move
-+ * @head: the head that will precede our entry
-+ */
-+static inline void list_move(struct list_head *list, struct list_head *head)
-+{
-+ __list_del(list->prev, list->next);
-+ list_add(list, head);
-+}
-+
-+/**
-+ * list_move_tail - delete from one list and add as another's tail
-+ * @list: the entry to move
-+ * @head: the head that will follow our entry
-+ */
-+static inline void list_move_tail(struct list_head *list,
-+ struct list_head *head)
-+{
-+ __list_del(list->prev, list->next);
-+ list_add_tail(list, head);
-+}
-+
-+/**
-+ * list_is_last - tests whether @list is the last entry in list @head
-+ * @list: the entry to test
-+ * @head: the head of the list
-+ */
-+static inline int list_is_last(const struct list_head *list,
-+ const struct list_head *head)
-+{
-+ return list->next == head;
-+}
-+
-+/**
-+ * list_empty - tests whether a list is empty
-+ * @head: the list to test.
-+ */
-+static inline int list_empty(const struct list_head *head)
-+{
-+ return head->next == head;
-+}
-+
-+/**
-+ * list_empty_careful - tests whether a list is empty and not being modified
-+ * @head: the list to test
-+ *
-+ * Description:
-+ * tests whether a list is empty _and_ checks that no other CPU might be
-+ * in the process of modifying either member (next or prev)
-+ *
-+ * NOTE: using list_empty_careful() without synchronization
-+ * can only be safe if the only activity that can happen
-+ * to the list entry is list_del_init(). Eg. it cannot be used
-+ * if another CPU could re-list_add() it.
-+ */
-+static inline int list_empty_careful(const struct list_head *head)
-+{
-+ struct list_head *next = head->next;
-+ return (next == head) && (next == head->prev);
-+}
-+
-+/**
-+ * list_is_singular - tests whether a list has just one entry.
-+ * @head: the list to test.
-+ */
-+static inline int list_is_singular(const struct list_head *head)
-+{
-+ return !list_empty(head) && (head->next == head->prev);
-+}
-+
-+static inline void __list_cut_position(struct list_head *list,
-+ struct list_head *head,
-+ struct list_head *entry)
-+{
-+ struct list_head *new_first = entry->next;
-+ list->next = head->next;
-+ list->next->prev = list;
-+ list->prev = entry;
-+ entry->next = list;
-+ head->next = new_first;
-+ new_first->prev = head;
-+}
-+
-+/**
-+ * list_cut_position - cut a list into two
-+ * @list: a new list to add all removed entries
-+ * @head: a list with entries
-+ * @entry: an entry within head, could be the head itself
-+ * and if so we won't cut the list
-+ *
-+ * This helper moves the initial part of @head, up to and
-+ * including @entry, from @head to @list. You should
-+ * pass on @entry an element you know is on @head. @list
-+ * should be an empty list or a list you do not care about
-+ * losing its data.
-+ *
-+ */
-+static inline void list_cut_position(struct list_head *list,
-+ struct list_head *head,
-+ struct list_head *entry)
-+{
-+ if (list_empty(head))
-+ return;
-+ if (list_is_singular(head) && (head->next != entry && head != entry))
-+ return;
-+ if (entry == head)
-+ INIT_LIST_HEAD(list);
-+ else
-+ __list_cut_position(list, head, entry);
-+}
-+
-+static inline void __list_splice(const struct list_head *list,
-+ struct list_head *prev, struct list_head *next)
-+{
-+ struct list_head *first = list->next;
-+ struct list_head *last = list->prev;
-+
-+ first->prev = prev;
-+ prev->next = first;
-+
-+ last->next = next;
-+ next->prev = last;
-+}
-+
-+/**
-+ * list_splice - join two lists, this is designed for stacks
-+ * @list: the new list to add.
-+ * @head: the place to add it in the first list.
-+ */
-+static inline void list_splice(const struct list_head *list,
-+ struct list_head *head)
-+{
-+ if (!list_empty(list))
-+ __list_splice(list, head, head->next);
-+}
-+
-+/**
-+ * list_splice_tail - join two lists, each list being a queue
-+ * @list: the new list to add.
-+ * @head: the place to add it in the first list.
-+ */
-+static inline void list_splice_tail(struct list_head *list,
-+ struct list_head *head)
-+{
-+ if (!list_empty(list))
-+ __list_splice(list, head->prev, head);
-+}
-+
-+/**
-+ * list_splice_init - join two lists and reinitialise the emptied list.
-+ * @list: the new list to add.
-+ * @head: the place to add it in the first list.
-+ *
-+ * The list at @list is reinitialised
-+ */
-+static inline void list_splice_init(struct list_head *list,
-+ struct list_head *head)
-+{
-+ if (!list_empty(list)) {
-+ __list_splice(list, head, head->next);
-+ INIT_LIST_HEAD(list);
-+ }
-+}
-+
-+/**
-+ * list_splice_tail_init - join two lists and reinitialise the emptied list
-+ * @list: the new list to add.
-+ * @head: the place to add it in the first list.
-+ *
-+ * Each of the lists is a queue.
-+ * The list at @list is reinitialised
-+ */
-+static inline void list_splice_tail_init(struct list_head *list,
-+ struct list_head *head)
-+{
-+ if (!list_empty(list)) {
-+ __list_splice(list, head->prev, head);
-+ INIT_LIST_HEAD(list);
-+ }
-+}
-+
-+/**
-+ * list_entry - get the struct for this entry
-+ * @ptr: the &struct list_head pointer.
-+ * @type: the type of the struct this is embedded in.
-+ * @member: the name of the list_struct within the struct.
-+ */
-+#define list_entry(ptr, type, member) \
-+ container_of(ptr, type, member)
-+
-+/**
-+ * list_first_entry - get the first element from a list
-+ * @ptr: the list head to take the element from.
-+ * @type: the type of the struct this is embedded in.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Note, that list is expected to be not empty.
-+ */
-+#define list_first_entry(ptr, type, member) \
-+ list_entry((ptr)->next, type, member)
-+
-+/**
-+ * list_for_each - iterate over a list
-+ * @pos: the &struct list_head to use as a loop cursor.
-+ * @head: the head for your list.
-+ */
-+#define list_for_each(pos, head) \
-+ for (pos = (head)->next; prefetch(pos->next), pos != (head); \
-+ pos = pos->next)
-+
-+/**
-+ * __list_for_each - iterate over a list
-+ * @pos: the &struct list_head to use as a loop cursor.
-+ * @head: the head for your list.
-+ *
-+ * This variant differs from list_for_each() in that it's the
-+ * simplest possible list iteration code, no prefetching is done.
-+ * Use this for code that knows the list to be very short (empty
-+ * or 1 entry) most of the time.
-+ */
-+#define __list_for_each(pos, head) \
-+ for (pos = (head)->next; pos != (head); pos = pos->next)
-+
-+/**
-+ * list_for_each_prev - iterate over a list backwards
-+ * @pos: the &struct list_head to use as a loop cursor.
-+ * @head: the head for your list.
-+ */
-+#define list_for_each_prev(pos, head) \
-+ for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
-+ pos = pos->prev)
-+
-+/**
-+ * list_for_each_safe - iterate over a list safe against removal of list entry
-+ * @pos: the &struct list_head to use as a loop cursor.
-+ * @n: another &struct list_head to use as temporary storage
-+ * @head: the head for your list.
-+ */
-+#define list_for_each_safe(pos, n, head) \
-+ for (pos = (head)->next, n = pos->next; pos != (head); \
-+ pos = n, n = pos->next)
-+
-+/**
-+ * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
-+ * @pos: the &struct list_head to use as a loop cursor.
-+ * @n: another &struct list_head to use as temporary storage
-+ * @head: the head for your list.
-+ */
-+#define list_for_each_prev_safe(pos, n, head) \
-+ for (pos = (head)->prev, n = pos->prev; \
-+ prefetch(pos->prev), pos != (head); \
-+ pos = n, n = pos->prev)
-+
-+/**
-+ * list_for_each_entry - iterate over list of given type
-+ * @pos: the type * to use as a loop cursor.
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ */
-+#define list_for_each_entry(pos, head, member) \
-+ for (pos = list_entry((head)->next, typeof(*pos), member); \
-+ &pos->member != (head); \
-+ pos = list_entry(pos->member.next, typeof(*pos), member))
-+
-+/**
-+ * list_for_each_entry_reverse - iterate backwards over list of given type.
-+ * @pos: the type * to use as a loop cursor.
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ */
-+#define list_for_each_entry_reverse(pos, head, member) \
-+ for (pos = list_entry((head)->prev, typeof(*pos), member); \
-+ prefetch(pos->member.prev), &pos->member != (head); \
-+ pos = list_entry(pos->member.prev, typeof(*pos), member))
-+
-+/**
-+ * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
-+ * @pos: the type * to use as a start point
-+ * @head: the head of the list
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
-+ */
-+#define list_prepare_entry(pos, head, member) \
-+ ((pos) ? : list_entry(head, typeof(*pos), member))
-+
-+/**
-+ * list_for_each_entry_continue - continue iteration over list of given type
-+ * @pos: the type * to use as a loop cursor.
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Continue to iterate over list of given type, continuing after
-+ * the current position.
-+ */
-+#define list_for_each_entry_continue(pos, head, member) \
-+ for (pos = list_entry(pos->member.next, typeof(*pos), member); \
-+ prefetch(pos->member.next), &pos->member != (head); \
-+ pos = list_entry(pos->member.next, typeof(*pos), member))
-+
-+/**
-+ * list_for_each_entry_continue_reverse - iterate backwards from the given point
-+ * @pos: the type * to use as a loop cursor.
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Start to iterate over list of given type backwards, continuing after
-+ * the current position.
-+ */
-+#define list_for_each_entry_continue_reverse(pos, head, member) \
-+ for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
-+ prefetch(pos->member.prev), &pos->member != (head); \
-+ pos = list_entry(pos->member.prev, typeof(*pos), member))
-+
-+/**
-+ * list_for_each_entry_from - iterate over list of given type from the current point
-+ * @pos: the type * to use as a loop cursor.
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Iterate over list of given type, continuing from current position.
-+ */
-+#define list_for_each_entry_from(pos, head, member) \
-+ for (; prefetch(pos->member.next), &pos->member != (head); \
-+ pos = list_entry(pos->member.next, typeof(*pos), member))
-+
-+/**
-+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
-+ * @pos: the type * to use as a loop cursor.
-+ * @n: another type * to use as temporary storage
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ */
-+#define list_for_each_entry_safe(pos, n, head, member) \
-+ for (pos = list_entry((head)->next, typeof(*pos), member), \
-+ n = list_entry(pos->member.next, typeof(*pos), member); \
-+ &pos->member != (head); \
-+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
-+
-+/**
-+ * list_for_each_entry_safe_continue
-+ * @pos: the type * to use as a loop cursor.
-+ * @n: another type * to use as temporary storage
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Iterate over list of given type, continuing after current point,
-+ * safe against removal of list entry.
-+ */
-+#define list_for_each_entry_safe_continue(pos, n, head, member) \
-+ for (pos = list_entry(pos->member.next, typeof(*pos), member), \
-+ n = list_entry(pos->member.next, typeof(*pos), member); \
-+ &pos->member != (head); \
-+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
-+
-+/**
-+ * list_for_each_entry_safe_from
-+ * @pos: the type * to use as a loop cursor.
-+ * @n: another type * to use as temporary storage
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Iterate over list of given type from current point, safe against
-+ * removal of list entry.
-+ */
-+#define list_for_each_entry_safe_from(pos, n, head, member) \
-+ for (n = list_entry(pos->member.next, typeof(*pos), member); \
-+ &pos->member != (head); \
-+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
-+
-+/**
-+ * list_for_each_entry_safe_reverse
-+ * @pos: the type * to use as a loop cursor.
-+ * @n: another type * to use as temporary storage
-+ * @head: the head for your list.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Iterate backwards over list of given type, safe against removal
-+ * of list entry.
-+ */
-+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
-+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
-+ n = list_entry(pos->member.prev, typeof(*pos), member); \
-+ &pos->member != (head); \
-+ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
-+
-+struct offset {
-+ struct list_head list;
-+ unsigned offset;
-+};
-+
-+struct table {
-+ struct list_head offsets;
-+ unsigned offset_max;
-+ unsigned nentry;
-+ unsigned *table;
-+ char *gpu_prefix;
-+};
-+
-+struct offset *offset_new(unsigned o)
-+{
-+ struct offset *offset;
-+
-+ offset = (struct offset *)malloc(sizeof(struct offset));
-+ if (offset) {
-+ INIT_LIST_HEAD(&offset->list);
-+ offset->offset = o;
-+ }
-+ return offset;
-+}
-+
-+void table_offset_add(struct table *t, struct offset *offset)
-+{
-+ list_add_tail(&offset->list, &t->offsets);
-+}
-+
-+void table_init(struct table *t)
-+{
-+ INIT_LIST_HEAD(&t->offsets);
-+ t->offset_max = 0;
-+ t->nentry = 0;
-+ t->table = NULL;
-+}
-+
-+void table_print(struct table *t)
-+{
-+ unsigned nlloop, i, j, n, c, id;
-+
-+ nlloop = (t->nentry + 3) / 4;
-+ c = t->nentry;
-+ printf("static const unsigned %s_reg_safe_bm[%d] = {\n", t->gpu_prefix,
-+ t->nentry);
-+ for (i = 0, id = 0; i < nlloop; i++) {
-+ n = 4;
-+ if (n > c)
-+ n = c;
-+ c -= n;
-+ for (j = 0; j < n; j++) {
-+ if (j == 0)
-+ printf("\t");
-+ else
-+ printf(" ");
-+ printf("0x%08X,", t->table[id++]);
-+ }
-+ printf("\n");
-+ }
-+ printf("};\n");
-+}
-+
-+int table_build(struct table *t)
-+{
-+ struct offset *offset;
-+ unsigned i, m;
-+
-+ t->nentry = ((t->offset_max >> 2) + 31) / 32;
-+ t->table = (unsigned *)malloc(sizeof(unsigned) * t->nentry);
-+ if (t->table == NULL)
-+ return -1;
-+ memset(t->table, 0xff, sizeof(unsigned) * t->nentry);
-+ list_for_each_entry(offset, &t->offsets, list) {
-+ i = (offset->offset >> 2) / 32;
-+ m = (offset->offset >> 2) & 31;
-+ m = 1 << m;
-+ t->table[i] ^= m;
-+ }
-+ return 0;
-+}
-+
-+static char gpu_name[10];
-+int parser_auth(struct table *t, const char *filename)
-+{
-+ FILE *file;
-+ regex_t mask_rex;
-+ regmatch_t match[4];
-+ char buf[1024];
-+ size_t end;
-+ int len;
-+ int done = 0;
-+ int r;
-+ unsigned o;
-+ struct offset *offset;
-+ char last_reg_s[10];
-+ int last_reg;
-+
-+ if (regcomp
-+ (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
-+ fprintf(stderr, "Failed to compile regular expression\n");
-+ return -1;
-+ }
-+ file = fopen(filename, "r");
-+ if (file == NULL) {
-+ fprintf(stderr, "Failed to open: %s\n", filename);
-+ return -1;
-+ }
-+ fseek(file, 0, SEEK_END);
-+ end = ftell(file);
-+ fseek(file, 0, SEEK_SET);
-+
-+ /* get header */
-+ if (fgets(buf, 1024, file) == NULL)
-+ return -1;
-+
-+ /* first line will contain the last register
-+ * and gpu name */
-+ sscanf(buf, "%s %s", gpu_name, last_reg_s);
-+ t->gpu_prefix = gpu_name;
-+ last_reg = strtol(last_reg_s, NULL, 16);
-+
-+ do {
-+ if (fgets(buf, 1024, file) == NULL)
-+ return -1;
-+ len = strlen(buf);
-+ if (ftell(file) == end)
-+ done = 1;
-+ if (len) {
-+ r = regexec(&mask_rex, buf, 4, match, 0);
-+ if (r == REG_NOMATCH) {
-+ } else if (r) {
-+ fprintf(stderr,
-+ "Error matching regular expression %d in %s\n",
-+ r, filename);
-+ return -1;
-+ } else {
-+ buf[match[0].rm_eo] = 0;
-+ buf[match[1].rm_eo] = 0;
-+ buf[match[2].rm_eo] = 0;
-+ o = strtol(&buf[match[1].rm_so], NULL, 16);
-+ offset = offset_new(o);
-+ table_offset_add(t, offset);
-+ if (o > t->offset_max)
-+ t->offset_max = o;
-+ }
-+ }
-+ } while (!done);
-+ fclose(file);
-+ if (t->offset_max < last_reg)
-+ t->offset_max = last_reg;
-+ return table_build(t);
-+}
-+
-+int main(int argc, char *argv[])
-+{
-+ struct table t;
-+
-+ if (argc != 2) {
-+ fprintf(stderr, "Usage: %s <authfile>\n", argv[0]);
-+ exit(1);
-+ }
-+ table_init(&t);
-+ if (parser_auth(&t, argv[1])) {
-+ fprintf(stderr, "Failed to parse file %s\n", argv[1]);
-+ return -1;
-+ }
-+ table_print(&t);
-+ return 0;
-+}
-diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
-index 68e728e..5708c07 100644
---- a/drivers/gpu/drm/radeon/r100.c
-+++ b/drivers/gpu/drm/radeon/r100.c
-@@ -29,15 +29,35 @@
- #include "drmP.h"
- #include "drm.h"
- #include "radeon_drm.h"
--#include "radeon_microcode.h"
- #include "radeon_reg.h"
- #include "radeon.h"
-+#include "r100d.h"
-+
-+#include <linux/firmware.h>
-+#include <linux/platform_device.h>
-+
-+#include "r100_reg_safe.h"
-+#include "rn50_reg_safe.h"
-+
-+/* Firmware Names */
-+#define FIRMWARE_R100 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_R200 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_R300 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_R420 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_RS690 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_RS600 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_R520 "/*(DEBLOBBED)*/"
-+
-+/*(DEBLOBBED)*/
-+
-+#include "r100_track.h"
-
- /* This files gather functions specifics to:
- * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
- *
- * Some of these functions might be used by newer ASICs.
- */
-+int r200_init(struct radeon_device *rdev);
- void r100_hdp_reset(struct radeon_device *rdev);
- void r100_gpu_init(struct radeon_device *rdev);
- int r100_gui_wait_for_idle(struct radeon_device *rdev);
-@@ -367,9 +393,9 @@ int r100_wb_init(struct radeon_device *rdev)
- return r;
- }
- }
-- WREG32(0x774, rdev->wb.gpu_addr);
-- WREG32(0x70C, rdev->wb.gpu_addr + 1024);
-- WREG32(0x770, 0xff);
-+ WREG32(RADEON_SCRATCH_ADDR, rdev->wb.gpu_addr);
-+ WREG32(RADEON_CP_RB_RPTR_ADDR, rdev->wb.gpu_addr + 1024);
-+ WREG32(RADEON_SCRATCH_UMSK, 0xff);
- return 0;
- }
-
-@@ -478,12 +504,100 @@ void r100_ring_start(struct radeon_device *rdev)
- radeon_ring_unlock_commit(rdev);
- }
-
--#define r100_cp_load_microcode(rdev) \
-- do { \
-- DRM_ERROR("Missing Free microcode!\n"); \
-- return -EINVAL; \
-- } while (0)
--/*(DEBLOBBED)*/
-+
-+/* Load the microcode for the CP */
-+static int r100_cp_init_microcode(struct radeon_device *rdev)
-+{
-+ struct platform_device *pdev;
-+ const char *fw_name = NULL;
-+ int err;
-+
-+ DRM_DEBUG("\n");
-+
-+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
-+ err = IS_ERR(pdev);
-+ if (err) {
-+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
-+ return -EINVAL;
-+ }
-+ if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
-+ (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
-+ (rdev->family == CHIP_RS200)) {
-+ DRM_INFO("Loading R100 Microcode\n");
-+ fw_name = FIRMWARE_R100;
-+ } else if ((rdev->family == CHIP_R200) ||
-+ (rdev->family == CHIP_RV250) ||
-+ (rdev->family == CHIP_RV280) ||
-+ (rdev->family == CHIP_RS300)) {
-+ DRM_INFO("Loading R200 Microcode\n");
-+ fw_name = FIRMWARE_R200;
-+ } else if ((rdev->family == CHIP_R300) ||
-+ (rdev->family == CHIP_R350) ||
-+ (rdev->family == CHIP_RV350) ||
-+ (rdev->family == CHIP_RV380) ||
-+ (rdev->family == CHIP_RS400) ||
-+ (rdev->family == CHIP_RS480)) {
-+ DRM_INFO("Loading R300 Microcode\n");
-+ fw_name = FIRMWARE_R300;
-+ } else if ((rdev->family == CHIP_R420) ||
-+ (rdev->family == CHIP_R423) ||
-+ (rdev->family == CHIP_RV410)) {
-+ DRM_INFO("Loading R400 Microcode\n");
-+ fw_name = FIRMWARE_R420;
-+ } else if ((rdev->family == CHIP_RS690) ||
-+ (rdev->family == CHIP_RS740)) {
-+ DRM_INFO("Loading RS690/RS740 Microcode\n");
-+ fw_name = FIRMWARE_RS690;
-+ } else if (rdev->family == CHIP_RS600) {
-+ DRM_INFO("Loading RS600 Microcode\n");
-+ fw_name = FIRMWARE_RS600;
-+ } else if ((rdev->family == CHIP_RV515) ||
-+ (rdev->family == CHIP_R520) ||
-+ (rdev->family == CHIP_RV530) ||
-+ (rdev->family == CHIP_R580) ||
-+ (rdev->family == CHIP_RV560) ||
-+ (rdev->family == CHIP_RV570)) {
-+ DRM_INFO("Loading R500 Microcode\n");
-+ fw_name = FIRMWARE_R520;
-+ }
-+
-+ err = reject_firmware(&rdev->me_fw, fw_name, &pdev->dev);
-+ platform_device_unregister(pdev);
-+ if (err) {
-+ printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
-+ fw_name);
-+ } else if (rdev->me_fw->size % 8) {
-+ printk(KERN_ERR
-+ "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
-+ rdev->me_fw->size, fw_name);
-+ err = -EINVAL;
-+ release_firmware(rdev->me_fw);
-+ rdev->me_fw = NULL;
-+ }
-+ return err;
-+}
-+static void r100_cp_load_microcode(struct radeon_device *rdev)
-+{
-+ const __be32 *fw_data;
-+ int i, size;
-+
-+ if (r100_gui_wait_for_idle(rdev)) {
-+ printk(KERN_WARNING "Failed to wait GUI idle while "
-+ "programming pipes. Bad things might happen.\n");
-+ }
-+
-+ if (rdev->me_fw) {
-+ size = rdev->me_fw->size / 4;
-+ fw_data = (const __be32 *)&rdev->me_fw->data[0];
-+ WREG32(RADEON_CP_ME_RAM_ADDR, 0);
-+ for (i = 0; i < size; i += 2) {
-+ WREG32(RADEON_CP_ME_RAM_DATAH,
-+ be32_to_cpup(&fw_data[i]));
-+ WREG32(RADEON_CP_ME_RAM_DATAL,
-+ be32_to_cpup(&fw_data[i + 1]));
-+ }
-+ }
-+}
-
- int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
- {
-@@ -585,6 +633,15 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
- } else {
- DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
- }
-+
-+ if (!rdev->me_fw) {
-+ r = r100_cp_init_microcode(rdev);
-+ if (r) {
-+ DRM_ERROR("Failed to load firmware!\n");
-+ return r;
-+ }
-+ }
-+
- /* Align ring size */
- rb_bufsz = drm_order(ring_size / 8);
- ring_size = (1 << (rb_bufsz + 1)) * 4;
-@@ -710,6 +767,12 @@ int r100_cp_reset(struct radeon_device *rdev)
- return -1;
- }
-
-+void r100_cp_commit(struct radeon_device *rdev)
-+{
-+ WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
-+ (void)RREG32(RADEON_CP_RB_WPTR);
-+}
-+
-
- /*
- * CS functions
-@@ -968,147 +1031,356 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- return 0;
- }
-
-+static int r100_get_vtx_size(uint32_t vtx_fmt)
-+{
-+ int vtx_size;
-+ vtx_size = 2;
-+ /* ordered according to bits in spec */
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
-+ vtx_size += 3;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
-+ vtx_size += 3;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
-+ vtx_size += 2;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
-+ vtx_size += 2;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
-+ vtx_size += 2;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
-+ vtx_size += 2;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
-+ vtx_size++;
-+ /* blend weight */
-+ if (vtx_fmt & (0x7 << 15))
-+ vtx_size += (vtx_fmt >> 15) & 0x7;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
-+ vtx_size += 3;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
-+ vtx_size += 2;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
-+ vtx_size++;
-+ if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
-+ vtx_size++;
-+ return vtx_size;
-+}
-+
- static int r100_packet0_check(struct radeon_cs_parser *p,
-- struct radeon_cs_packet *pkt)
-+ struct radeon_cs_packet *pkt,
-+ unsigned idx, unsigned reg)
- {
- struct radeon_cs_chunk *ib_chunk;
- struct radeon_cs_reloc *reloc;
-+ struct r100_cs_track *track;
- volatile uint32_t *ib;
- uint32_t tmp;
-- unsigned reg;
-- unsigned i;
-- unsigned idx;
-- bool onereg;
- int r;
-+ int i, face;
- u32 tile_flags = 0;
-
- ib = p->ib->ptr;
- ib_chunk = &p->chunks[p->chunk_ib_idx];
-- idx = pkt->idx + 1;
-- reg = pkt->reg;
-- onereg = false;
-- if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
-- onereg = true;
-- }
-- for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
-- switch (reg) {
-- case RADEON_CRTC_GUI_TRIG_VLINE:
-- r = r100_cs_packet_parse_vline(p);
-- if (r) {
-- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-- idx, reg);
-- r100_cs_dump_packet(p, pkt);
-- return r;
-- }
-- break;
-+ track = (struct r100_cs_track *)p->track;
-+
-+ switch (reg) {
-+ case RADEON_CRTC_GUI_TRIG_VLINE:
-+ r = r100_cs_packet_parse_vline(p);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ break;
- /* FIXME: only allow PACKET3 blit? easier to check for out of
- * range access */
-- case RADEON_DST_PITCH_OFFSET:
-- case RADEON_SRC_PITCH_OFFSET:
-- r = r100_cs_packet_next_reloc(p, &reloc);
-- if (r) {
-- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-- idx, reg);
-- r100_cs_dump_packet(p, pkt);
-- return r;
-- }
-- tmp = ib_chunk->kdata[idx] & 0x003fffff;
-- tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
--
-- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-- tile_flags |= RADEON_DST_TILE_MACRO;
-- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-- if (reg == RADEON_SRC_PITCH_OFFSET) {
-- DRM_ERROR("Cannot src blit from microtiled surface\n");
-- r100_cs_dump_packet(p, pkt);
-- return -EINVAL;
-- }
-- tile_flags |= RADEON_DST_TILE_MICRO;
-- }
-+ case RADEON_DST_PITCH_OFFSET:
-+ case RADEON_SRC_PITCH_OFFSET:
-+ r = r100_reloc_pitch_offset(p, pkt, idx, reg);
-+ if (r)
-+ return r;
-+ break;
-+ case RADEON_RB3D_DEPTHOFFSET:
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ track->zb.robj = reloc->robj;
-+ track->zb.offset = ib_chunk->kdata[idx];
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ break;
-+ case RADEON_RB3D_COLOROFFSET:
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ track->cb[0].robj = reloc->robj;
-+ track->cb[0].offset = ib_chunk->kdata[idx];
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ break;
-+ case RADEON_PP_TXOFFSET_0:
-+ case RADEON_PP_TXOFFSET_1:
-+ case RADEON_PP_TXOFFSET_2:
-+ i = (reg - RADEON_PP_TXOFFSET_0) / 24;
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ track->textures[i].robj = reloc->robj;
-+ break;
-+ case RADEON_PP_CUBIC_OFFSET_T0_0:
-+ case RADEON_PP_CUBIC_OFFSET_T0_1:
-+ case RADEON_PP_CUBIC_OFFSET_T0_2:
-+ case RADEON_PP_CUBIC_OFFSET_T0_3:
-+ case RADEON_PP_CUBIC_OFFSET_T0_4:
-+ i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ track->textures[0].cube_info[i].robj = reloc->robj;
-+ break;
-+ case RADEON_PP_CUBIC_OFFSET_T1_0:
-+ case RADEON_PP_CUBIC_OFFSET_T1_1:
-+ case RADEON_PP_CUBIC_OFFSET_T1_2:
-+ case RADEON_PP_CUBIC_OFFSET_T1_3:
-+ case RADEON_PP_CUBIC_OFFSET_T1_4:
-+ i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ track->textures[1].cube_info[i].robj = reloc->robj;
-+ break;
-+ case RADEON_PP_CUBIC_OFFSET_T2_0:
-+ case RADEON_PP_CUBIC_OFFSET_T2_1:
-+ case RADEON_PP_CUBIC_OFFSET_T2_2:
-+ case RADEON_PP_CUBIC_OFFSET_T2_3:
-+ case RADEON_PP_CUBIC_OFFSET_T2_4:
-+ i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ track->textures[2].cube_info[i].robj = reloc->robj;
-+ break;
-+ case RADEON_RE_WIDTH_HEIGHT:
-+ track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
-+ break;
-+ case RADEON_RB3D_COLORPITCH:
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-
-- tmp |= tile_flags;
-- ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
-- break;
-- case RADEON_RB3D_DEPTHOFFSET:
-- case RADEON_RB3D_COLOROFFSET:
-- case R300_RB3D_COLOROFFSET0:
-- case R300_ZB_DEPTHOFFSET:
-- case R200_PP_TXOFFSET_0:
-- case R200_PP_TXOFFSET_1:
-- case R200_PP_TXOFFSET_2:
-- case R200_PP_TXOFFSET_3:
-- case R200_PP_TXOFFSET_4:
-- case R200_PP_TXOFFSET_5:
-- case RADEON_PP_TXOFFSET_0:
-- case RADEON_PP_TXOFFSET_1:
-- case RADEON_PP_TXOFFSET_2:
-- case R300_TX_OFFSET_0:
-- case R300_TX_OFFSET_0+4:
-- case R300_TX_OFFSET_0+8:
-- case R300_TX_OFFSET_0+12:
-- case R300_TX_OFFSET_0+16:
-- case R300_TX_OFFSET_0+20:
-- case R300_TX_OFFSET_0+24:
-- case R300_TX_OFFSET_0+28:
-- case R300_TX_OFFSET_0+32:
-- case R300_TX_OFFSET_0+36:
-- case R300_TX_OFFSET_0+40:
-- case R300_TX_OFFSET_0+44:
-- case R300_TX_OFFSET_0+48:
-- case R300_TX_OFFSET_0+52:
-- case R300_TX_OFFSET_0+56:
-- case R300_TX_OFFSET_0+60:
-- /* rn50 has no 3D engine so fail on any 3d setup */
-- if (ASIC_IS_RN50(p->rdev)) {
-- DRM_ERROR("attempt to use RN50 3D engine failed\n");
-- return -EINVAL;
-- }
-- r = r100_cs_packet_next_reloc(p, &reloc);
-- if (r) {
-- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-- idx, reg);
-- r100_cs_dump_packet(p, pkt);
-- return r;
-- }
-- ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-- break;
-- case R300_RB3D_COLORPITCH0:
-- case RADEON_RB3D_COLORPITCH:
-- r = r100_cs_packet_next_reloc(p, &reloc);
-- if (r) {
-- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-- idx, reg);
-- r100_cs_dump_packet(p, pkt);
-- return r;
-- }
-+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
-+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
-
-- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-- tile_flags |= RADEON_COLOR_TILE_ENABLE;
-- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-- tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
-+ tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
-+ tmp |= tile_flags;
-+ ib[idx] = tmp;
-
-- tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
-- tmp |= tile_flags;
-- ib[idx] = tmp;
-+ track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
-+ break;
-+ case RADEON_RB3D_DEPTHPITCH:
-+ track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
-+ break;
-+ case RADEON_RB3D_CNTL:
-+ switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
-+ case 7:
-+ case 8:
-+ case 9:
-+ case 11:
-+ case 12:
-+ track->cb[0].cpp = 1;
- break;
-- case RADEON_RB3D_ZPASS_ADDR:
-- r = r100_cs_packet_next_reloc(p, &reloc);
-- if (r) {
-- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-- idx, reg);
-- r100_cs_dump_packet(p, pkt);
-- return r;
-- }
-- ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ case 3:
-+ case 4:
-+ case 15:
-+ track->cb[0].cpp = 2;
-+ break;
-+ case 6:
-+ track->cb[0].cpp = 4;
-+ break;
-+ default:
-+ DRM_ERROR("Invalid color buffer format (%d) !\n",
-+ ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
-+ return -EINVAL;
-+ }
-+ track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
-+ break;
-+ case RADEON_RB3D_ZSTENCILCNTL:
-+ switch (ib_chunk->kdata[idx] & 0xf) {
-+ case 0:
-+ track->zb.cpp = 2;
-+ break;
-+ case 2:
-+ case 3:
-+ case 4:
-+ case 5:
-+ case 9:
-+ case 11:
-+ track->zb.cpp = 4;
- break;
- default:
-- /* FIXME: we don't want to allow anyothers packet */
- break;
- }
-- if (onereg) {
-- /* FIXME: forbid onereg write to register on relocate */
-+ break;
-+ case RADEON_RB3D_ZPASS_ADDR:
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ break;
-+ case RADEON_PP_CNTL:
-+ {
-+ uint32_t temp = ib_chunk->kdata[idx] >> 4;
-+ for (i = 0; i < track->num_texture; i++)
-+ track->textures[i].enabled = !!(temp & (1 << i));
-+ }
-+ break;
-+ case RADEON_SE_VF_CNTL:
-+ track->vap_vf_cntl = ib_chunk->kdata[idx];
-+ break;
-+ case RADEON_SE_VTX_FMT:
-+ track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
-+ break;
-+ case RADEON_PP_TEX_SIZE_0:
-+ case RADEON_PP_TEX_SIZE_1:
-+ case RADEON_PP_TEX_SIZE_2:
-+ i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
-+ track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
-+ track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
-+ break;
-+ case RADEON_PP_TEX_PITCH_0:
-+ case RADEON_PP_TEX_PITCH_1:
-+ case RADEON_PP_TEX_PITCH_2:
-+ i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
-+ track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
-+ break;
-+ case RADEON_PP_TXFILTER_0:
-+ case RADEON_PP_TXFILTER_1:
-+ case RADEON_PP_TXFILTER_2:
-+ i = (reg - RADEON_PP_TXFILTER_0) / 24;
-+ track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
-+ >> RADEON_MAX_MIP_LEVEL_SHIFT);
-+ tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
-+ if (tmp == 2 || tmp == 6)
-+ track->textures[i].roundup_w = false;
-+ tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
-+ if (tmp == 2 || tmp == 6)
-+ track->textures[i].roundup_h = false;
-+ break;
-+ case RADEON_PP_TXFORMAT_0:
-+ case RADEON_PP_TXFORMAT_1:
-+ case RADEON_PP_TXFORMAT_2:
-+ i = (reg - RADEON_PP_TXFORMAT_0) / 24;
-+ if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
-+ track->textures[i].use_pitch = 1;
-+ } else {
-+ track->textures[i].use_pitch = 0;
-+ track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
-+ track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
-+ }
-+ if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
-+ track->textures[i].tex_coord_type = 2;
-+ switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
-+ case RADEON_TXFORMAT_I8:
-+ case RADEON_TXFORMAT_RGB332:
-+ case RADEON_TXFORMAT_Y8:
-+ track->textures[i].cpp = 1;
-+ break;
-+ case RADEON_TXFORMAT_AI88:
-+ case RADEON_TXFORMAT_ARGB1555:
-+ case RADEON_TXFORMAT_RGB565:
-+ case RADEON_TXFORMAT_ARGB4444:
-+ case RADEON_TXFORMAT_VYUY422:
-+ case RADEON_TXFORMAT_YVYU422:
-+ case RADEON_TXFORMAT_DXT1:
-+ case RADEON_TXFORMAT_SHADOW16:
-+ case RADEON_TXFORMAT_LDUDV655:
-+ case RADEON_TXFORMAT_DUDV88:
-+ track->textures[i].cpp = 2;
- break;
-+ case RADEON_TXFORMAT_ARGB8888:
-+ case RADEON_TXFORMAT_RGBA8888:
-+ case RADEON_TXFORMAT_DXT23:
-+ case RADEON_TXFORMAT_DXT45:
-+ case RADEON_TXFORMAT_SHADOW32:
-+ case RADEON_TXFORMAT_LDUDUV8888:
-+ track->textures[i].cpp = 4;
-+ break;
-+ }
-+ track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
-+ track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
-+ break;
-+ case RADEON_PP_CUBIC_FACES_0:
-+ case RADEON_PP_CUBIC_FACES_1:
-+ case RADEON_PP_CUBIC_FACES_2:
-+ tmp = ib_chunk->kdata[idx];
-+ i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
-+ for (face = 0; face < 4; face++) {
-+ track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
-+ track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
- }
-+ break;
-+ default:
-+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
-+ reg, idx);
-+ return -EINVAL;
- }
- return 0;
- }
-@@ -1137,6 +1409,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
- {
- struct radeon_cs_chunk *ib_chunk;
- struct radeon_cs_reloc *reloc;
-+ struct r100_cs_track *track;
- unsigned idx;
- unsigned i, c;
- volatile uint32_t *ib;
-@@ -1145,9 +1418,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
- ib = p->ib->ptr;
- ib_chunk = &p->chunks[p->chunk_ib_idx];
- idx = pkt->idx + 1;
-+ track = (struct r100_cs_track *)p->track;
- switch (pkt->opcode) {
- case PACKET3_3D_LOAD_VBPNTR:
- c = ib_chunk->kdata[idx++];
-+ track->num_arrays = c;
- for (i = 0; i < (c - 1); i += 2, idx += 3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
-@@ -1157,6 +1432,9 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
- return r;
- }
- ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-+ track->arrays[i + 0].robj = reloc->robj;
-+ track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-+ track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
-@@ -1165,6 +1443,9 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
- return r;
- }
- ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
-+ track->arrays[i + 1].robj = reloc->robj;
-+ track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
-+ track->arrays[i + 1].esize &= 0x7F;
- }
- if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
-@@ -1175,6 +1456,9 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
- return r;
- }
- ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-+ track->arrays[i + 0].robj = reloc->robj;
-+ track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-+ track->arrays[i + 0].esize &= 0x7F;
- }
- break;
- case PACKET3_INDX_BUFFER:
-@@ -1191,7 +1475,6 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
- }
- break;
- case 0x23:
-- /* FIXME: cleanup */
- /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
-@@ -1200,18 +1483,71 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
- return r;
- }
- ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ track->num_arrays = 1;
-+ track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
-+
-+ track->arrays[0].robj = reloc->robj;
-+ track->arrays[0].esize = track->vtx_size;
-+
-+ track->max_indx = ib_chunk->kdata[idx+1];
-+
-+ track->vap_vf_cntl = ib_chunk->kdata[idx+3];
-+ track->immd_dwords = pkt->count - 1;
-+ r = r100_cs_track_check(p->rdev, track);
-+ if (r)
-+ return r;
- break;
- case PACKET3_3D_DRAW_IMMD:
-+ if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
-+ DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
-+ return -EINVAL;
-+ }
-+ track->vap_vf_cntl = ib_chunk->kdata[idx+1];
-+ track->immd_dwords = pkt->count - 1;
-+ r = r100_cs_track_check(p->rdev, track);
-+ if (r)
-+ return r;
-+ break;
- /* triggers drawing using in-packet vertex data */
- case PACKET3_3D_DRAW_IMMD_2:
-+ if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
-+ DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
-+ return -EINVAL;
-+ }
-+ track->vap_vf_cntl = ib_chunk->kdata[idx];
-+ track->immd_dwords = pkt->count;
-+ r = r100_cs_track_check(p->rdev, track);
-+ if (r)
-+ return r;
-+ break;
- /* triggers drawing using in-packet vertex data */
- case PACKET3_3D_DRAW_VBUF_2:
-+ track->vap_vf_cntl = ib_chunk->kdata[idx];
-+ r = r100_cs_track_check(p->rdev, track);
-+ if (r)
-+ return r;
-+ break;
- /* triggers drawing of vertex buffers setup elsewhere */
- case PACKET3_3D_DRAW_INDX_2:
-+ track->vap_vf_cntl = ib_chunk->kdata[idx];
-+ r = r100_cs_track_check(p->rdev, track);
-+ if (r)
-+ return r;
-+ break;
- /* triggers drawing using indices to vertex buffer */
- case PACKET3_3D_DRAW_VBUF:
-+ track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
-+ r = r100_cs_track_check(p->rdev, track);
-+ if (r)
-+ return r;
-+ break;
- /* triggers drawing of vertex buffers setup elsewhere */
- case PACKET3_3D_DRAW_INDX:
-+ track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
-+ r = r100_cs_track_check(p->rdev, track);
-+ if (r)
-+ return r;
-+ break;
- /* triggers drawing using indices to vertex buffer */
- case PACKET3_NOP:
- break;
-@@ -1225,8 +1561,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
- int r100_cs_parse(struct radeon_cs_parser *p)
- {
- struct radeon_cs_packet pkt;
-+ struct r100_cs_track track;
- int r;
-
-+ r100_cs_track_clear(p->rdev, &track);
-+ p->track = &track;
- do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
- if (r) {
-@@ -1235,7 +1574,16 @@ int r100_cs_parse(struct radeon_cs_parser *p)
- p->idx += pkt.count + 2;
- switch (pkt.type) {
- case PACKET_TYPE0:
-- r = r100_packet0_check(p, &pkt);
-+ if (p->rdev->family >= CHIP_R200)
-+ r = r100_cs_parse_packet0(p, &pkt,
-+ p->rdev->config.r100.reg_safe_bm,
-+ p->rdev->config.r100.reg_safe_bm_size,
-+ &r200_packet0_check);
-+ else
-+ r = r100_cs_parse_packet0(p, &pkt,
-+ p->rdev->config.r100.reg_safe_bm,
-+ p->rdev->config.r100.reg_safe_bm_size,
-+ &r100_packet0_check);
- break;
- case PACKET_TYPE2:
- break;
-@@ -1634,6 +1982,15 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-
- int r100_init(struct radeon_device *rdev)
- {
-+ if (ASIC_IS_RN50(rdev)) {
-+ rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
-+ rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
-+ } else if (rdev->family < CHIP_R200) {
-+ rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
-+ rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
-+ } else {
-+ return r200_init(rdev);
-+ }
- return 0;
- }
-
-@@ -2334,3 +2691,377 @@ void r100_bandwidth_update(struct radeon_device *rdev)
- (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
- }
- }
-+
-+static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
-+{
-+ DRM_ERROR("pitch %d\n", t->pitch);
-+ DRM_ERROR("width %d\n", t->width);
-+ DRM_ERROR("height %d\n", t->height);
-+ DRM_ERROR("num levels %d\n", t->num_levels);
-+ DRM_ERROR("depth %d\n", t->txdepth);
-+ DRM_ERROR("bpp %d\n", t->cpp);
-+ DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
-+ DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
-+ DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
-+}
-+
-+static int r100_cs_track_cube(struct radeon_device *rdev,
-+ struct r100_cs_track *track, unsigned idx)
-+{
-+ unsigned face, w, h;
-+ struct radeon_object *cube_robj;
-+ unsigned long size;
-+
-+ for (face = 0; face < 5; face++) {
-+ cube_robj = track->textures[idx].cube_info[face].robj;
-+ w = track->textures[idx].cube_info[face].width;
-+ h = track->textures[idx].cube_info[face].height;
-+
-+ size = w * h;
-+ size *= track->textures[idx].cpp;
-+
-+ size += track->textures[idx].cube_info[face].offset;
-+
-+ if (size > radeon_object_size(cube_robj)) {
-+ DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
-+ size, radeon_object_size(cube_robj));
-+ r100_cs_track_texture_print(&track->textures[idx]);
-+ return -1;
-+ }
-+ }
-+ return 0;
-+}
-+
-+static int r100_cs_track_texture_check(struct radeon_device *rdev,
-+ struct r100_cs_track *track)
-+{
-+ struct radeon_object *robj;
-+ unsigned long size;
-+ unsigned u, i, w, h;
-+ int ret;
-+
-+ for (u = 0; u < track->num_texture; u++) {
-+ if (!track->textures[u].enabled)
-+ continue;
-+ robj = track->textures[u].robj;
-+ if (robj == NULL) {
-+ DRM_ERROR("No texture bound to unit %u\n", u);
-+ return -EINVAL;
-+ }
-+ size = 0;
-+ for (i = 0; i <= track->textures[u].num_levels; i++) {
-+ if (track->textures[u].use_pitch) {
-+ if (rdev->family < CHIP_R300)
-+ w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
-+ else
-+ w = track->textures[u].pitch / (1 << i);
-+ } else {
-+ w = track->textures[u].width / (1 << i);
-+ if (rdev->family >= CHIP_RV515)
-+ w |= track->textures[u].width_11;
-+ if (track->textures[u].roundup_w)
-+ w = roundup_pow_of_two(w);
-+ }
-+ h = track->textures[u].height / (1 << i);
-+ if (rdev->family >= CHIP_RV515)
-+ h |= track->textures[u].height_11;
-+ if (track->textures[u].roundup_h)
-+ h = roundup_pow_of_two(h);
-+ size += w * h;
-+ }
-+ size *= track->textures[u].cpp;
-+ switch (track->textures[u].tex_coord_type) {
-+ case 0:
-+ break;
-+ case 1:
-+ size *= (1 << track->textures[u].txdepth);
-+ break;
-+ case 2:
-+ if (track->separate_cube) {
-+ ret = r100_cs_track_cube(rdev, track, u);
-+ if (ret)
-+ return ret;
-+ } else
-+ size *= 6;
-+ break;
-+ default:
-+ DRM_ERROR("Invalid texture coordinate type %u for unit "
-+ "%u\n", track->textures[u].tex_coord_type, u);
-+ return -EINVAL;
-+ }
-+ if (size > radeon_object_size(robj)) {
-+ DRM_ERROR("Texture of unit %u needs %lu bytes but is "
-+ "%lu\n", u, size, radeon_object_size(robj));
-+ r100_cs_track_texture_print(&track->textures[u]);
-+ return -EINVAL;
-+ }
-+ }
-+ return 0;
-+}
-+
-+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
-+{
-+ unsigned i;
-+ unsigned long size;
-+ unsigned prim_walk;
-+ unsigned nverts;
-+
-+ for (i = 0; i < track->num_cb; i++) {
-+ if (track->cb[i].robj == NULL) {
-+ DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
-+ return -EINVAL;
-+ }
-+ size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
-+ size += track->cb[i].offset;
-+ if (size > radeon_object_size(track->cb[i].robj)) {
-+ DRM_ERROR("[drm] Buffer too small for color buffer %d "
-+ "(need %lu have %lu) !\n", i, size,
-+ radeon_object_size(track->cb[i].robj));
-+ DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
-+ i, track->cb[i].pitch, track->cb[i].cpp,
-+ track->cb[i].offset, track->maxy);
-+ return -EINVAL;
-+ }
-+ }
-+ if (track->z_enabled) {
-+ if (track->zb.robj == NULL) {
-+ DRM_ERROR("[drm] No buffer for z buffer !\n");
-+ return -EINVAL;
-+ }
-+ size = track->zb.pitch * track->zb.cpp * track->maxy;
-+ size += track->zb.offset;
-+ if (size > radeon_object_size(track->zb.robj)) {
-+ DRM_ERROR("[drm] Buffer too small for z buffer "
-+ "(need %lu have %lu) !\n", size,
-+ radeon_object_size(track->zb.robj));
-+ DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
-+ track->zb.pitch, track->zb.cpp,
-+ track->zb.offset, track->maxy);
-+ return -EINVAL;
-+ }
-+ }
-+ prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
-+ nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
-+ switch (prim_walk) {
-+ case 1:
-+ for (i = 0; i < track->num_arrays; i++) {
-+ size = track->arrays[i].esize * track->max_indx * 4;
-+ if (track->arrays[i].robj == NULL) {
-+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
-+ "bound\n", prim_walk, i);
-+ return -EINVAL;
-+ }
-+ if (size > radeon_object_size(track->arrays[i].robj)) {
-+ DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-+ "have %lu dwords\n", prim_walk, i,
-+ size >> 2,
-+ radeon_object_size(track->arrays[i].robj) >> 2);
-+ DRM_ERROR("Max indices %u\n", track->max_indx);
-+ return -EINVAL;
-+ }
-+ }
-+ break;
-+ case 2:
-+ for (i = 0; i < track->num_arrays; i++) {
-+ size = track->arrays[i].esize * (nverts - 1) * 4;
-+ if (track->arrays[i].robj == NULL) {
-+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
-+ "bound\n", prim_walk, i);
-+ return -EINVAL;
-+ }
-+ if (size > radeon_object_size(track->arrays[i].robj)) {
-+ DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-+ "have %lu dwords\n", prim_walk, i, size >> 2,
-+ radeon_object_size(track->arrays[i].robj) >> 2);
-+ return -EINVAL;
-+ }
-+ }
-+ break;
-+ case 3:
-+ size = track->vtx_size * nverts;
-+ if (size != track->immd_dwords) {
-+ DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
-+ track->immd_dwords, size);
-+ DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
-+ nverts, track->vtx_size);
-+ return -EINVAL;
-+ }
-+ break;
-+ default:
-+ DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
-+ prim_walk);
-+ return -EINVAL;
-+ }
-+ return r100_cs_track_texture_check(rdev, track);
-+}
-+
-+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
-+{
-+ unsigned i, face;
-+
-+ if (rdev->family < CHIP_R300) {
-+ track->num_cb = 1;
-+ if (rdev->family <= CHIP_RS200)
-+ track->num_texture = 3;
-+ else
-+ track->num_texture = 6;
-+ track->maxy = 2048;
-+ track->separate_cube = 1;
-+ } else {
-+ track->num_cb = 4;
-+ track->num_texture = 16;
-+ track->maxy = 4096;
-+ track->separate_cube = 0;
-+ }
-+
-+ for (i = 0; i < track->num_cb; i++) {
-+ track->cb[i].robj = NULL;
-+ track->cb[i].pitch = 8192;
-+ track->cb[i].cpp = 16;
-+ track->cb[i].offset = 0;
-+ }
-+ track->z_enabled = true;
-+ track->zb.robj = NULL;
-+ track->zb.pitch = 8192;
-+ track->zb.cpp = 4;
-+ track->zb.offset = 0;
-+ track->vtx_size = 0x7F;
-+ track->immd_dwords = 0xFFFFFFFFUL;
-+ track->num_arrays = 11;
-+ track->max_indx = 0x00FFFFFFUL;
-+ for (i = 0; i < track->num_arrays; i++) {
-+ track->arrays[i].robj = NULL;
-+ track->arrays[i].esize = 0x7F;
-+ }
-+ for (i = 0; i < track->num_texture; i++) {
-+ track->textures[i].pitch = 16536;
-+ track->textures[i].width = 16536;
-+ track->textures[i].height = 16536;
-+ track->textures[i].width_11 = 1 << 11;
-+ track->textures[i].height_11 = 1 << 11;
-+ track->textures[i].num_levels = 12;
-+ if (rdev->family <= CHIP_RS200) {
-+ track->textures[i].tex_coord_type = 0;
-+ track->textures[i].txdepth = 0;
-+ } else {
-+ track->textures[i].txdepth = 16;
-+ track->textures[i].tex_coord_type = 1;
-+ }
-+ track->textures[i].cpp = 64;
-+ track->textures[i].robj = NULL;
-+ /* CS IB emission code makes sure texture unit are disabled */
-+ track->textures[i].enabled = false;
-+ track->textures[i].roundup_w = true;
-+ track->textures[i].roundup_h = true;
-+ if (track->separate_cube)
-+ for (face = 0; face < 5; face++) {
-+ track->textures[i].cube_info[face].robj = NULL;
-+ track->textures[i].cube_info[face].width = 16536;
-+ track->textures[i].cube_info[face].height = 16536;
-+ track->textures[i].cube_info[face].offset = 0;
-+ }
-+ }
-+}
-+
-+int r100_ring_test(struct radeon_device *rdev)
-+{
-+ uint32_t scratch;
-+ uint32_t tmp = 0;
-+ unsigned i;
-+ int r;
-+
-+ r = radeon_scratch_get(rdev, &scratch);
-+ if (r) {
-+ DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
-+ return r;
-+ }
-+ WREG32(scratch, 0xCAFEDEAD);
-+ r = radeon_ring_lock(rdev, 2);
-+ if (r) {
-+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
-+ radeon_scratch_free(rdev, scratch);
-+ return r;
-+ }
-+ radeon_ring_write(rdev, PACKET0(scratch, 0));
-+ radeon_ring_write(rdev, 0xDEADBEEF);
-+ radeon_ring_unlock_commit(rdev);
-+ for (i = 0; i < rdev->usec_timeout; i++) {
-+ tmp = RREG32(scratch);
-+ if (tmp == 0xDEADBEEF) {
-+ break;
-+ }
-+ DRM_UDELAY(1);
-+ }
-+ if (i < rdev->usec_timeout) {
-+ DRM_INFO("ring test succeeded in %d usecs\n", i);
-+ } else {
-+ DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
-+ scratch, tmp);
-+ r = -EINVAL;
-+ }
-+ radeon_scratch_free(rdev, scratch);
-+ return r;
-+}
-+
-+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-+{
-+ radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
-+ radeon_ring_write(rdev, ib->gpu_addr);
-+ radeon_ring_write(rdev, ib->length_dw);
-+}
-+
-+int r100_ib_test(struct radeon_device *rdev)
-+{
-+ struct radeon_ib *ib;
-+ uint32_t scratch;
-+ uint32_t tmp = 0;
-+ unsigned i;
-+ int r;
-+
-+ r = radeon_scratch_get(rdev, &scratch);
-+ if (r) {
-+ DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
-+ return r;
-+ }
-+ WREG32(scratch, 0xCAFEDEAD);
-+ r = radeon_ib_get(rdev, &ib);
-+ if (r) {
-+ return r;
-+ }
-+ ib->ptr[0] = PACKET0(scratch, 0);
-+ ib->ptr[1] = 0xDEADBEEF;
-+ ib->ptr[2] = PACKET2(0);
-+ ib->ptr[3] = PACKET2(0);
-+ ib->ptr[4] = PACKET2(0);
-+ ib->ptr[5] = PACKET2(0);
-+ ib->ptr[6] = PACKET2(0);
-+ ib->ptr[7] = PACKET2(0);
-+ ib->length_dw = 8;
-+ r = radeon_ib_schedule(rdev, ib);
-+ if (r) {
-+ radeon_scratch_free(rdev, scratch);
-+ radeon_ib_free(rdev, &ib);
-+ return r;
-+ }
-+ r = radeon_fence_wait(ib->fence, false);
-+ if (r) {
-+ return r;
-+ }
-+ for (i = 0; i < rdev->usec_timeout; i++) {
-+ tmp = RREG32(scratch);
-+ if (tmp == 0xDEADBEEF) {
-+ break;
-+ }
-+ DRM_UDELAY(1);
-+ }
-+ if (i < rdev->usec_timeout) {
-+ DRM_INFO("ib test succeeded in %u usecs\n", i);
-+ } else {
-+ DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
-+ scratch, tmp);
-+ r = -EINVAL;
-+ }
-+ radeon_scratch_free(rdev, scratch);
-+ radeon_ib_free(rdev, &ib);
-+ return r;
-+}
-diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
-new file mode 100644
-index 0000000..70a82ed
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r100_track.h
-@@ -0,0 +1,124 @@
-+
-+#define R100_TRACK_MAX_TEXTURE 3
-+#define R200_TRACK_MAX_TEXTURE 6
-+#define R300_TRACK_MAX_TEXTURE 16
-+
-+#define R100_MAX_CB 1
-+#define R300_MAX_CB 4
-+
-+/*
-+ * CS functions
-+ */
-+struct r100_cs_track_cb {
-+ struct radeon_object *robj;
-+ unsigned pitch;
-+ unsigned cpp;
-+ unsigned offset;
-+};
-+
-+struct r100_cs_track_array {
-+ struct radeon_object *robj;
-+ unsigned esize;
-+};
-+
-+struct r100_cs_cube_info {
-+ struct radeon_object *robj;
-+ unsigned offset;
-+ unsigned width;
-+ unsigned height;
-+};
-+
-+struct r100_cs_track_texture {
-+ struct radeon_object *robj;
-+ struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
-+ unsigned pitch;
-+ unsigned width;
-+ unsigned height;
-+ unsigned num_levels;
-+ unsigned cpp;
-+ unsigned tex_coord_type;
-+ unsigned txdepth;
-+ unsigned width_11;
-+ unsigned height_11;
-+ bool use_pitch;
-+ bool enabled;
-+ bool roundup_w;
-+ bool roundup_h;
-+};
-+
-+struct r100_cs_track_limits {
-+ unsigned num_cb;
-+ unsigned num_texture;
-+ unsigned max_levels;
-+};
-+
-+struct r100_cs_track {
-+ struct radeon_device *rdev;
-+ unsigned num_cb;
-+ unsigned num_texture;
-+ unsigned maxy;
-+ unsigned vtx_size;
-+ unsigned vap_vf_cntl;
-+ unsigned immd_dwords;
-+ unsigned num_arrays;
-+ unsigned max_indx;
-+ struct r100_cs_track_array arrays[11];
-+ struct r100_cs_track_cb cb[R300_MAX_CB];
-+ struct r100_cs_track_cb zb;
-+ struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
-+ bool z_enabled;
-+ bool separate_cube;
-+
-+};
-+
-+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
-+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
-+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
-+ struct radeon_cs_reloc **cs_reloc);
-+void r100_cs_dump_packet(struct radeon_cs_parser *p,
-+ struct radeon_cs_packet *pkt);
-+
-+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
-+
-+int r200_packet0_check(struct radeon_cs_parser *p,
-+ struct radeon_cs_packet *pkt,
-+ unsigned idx, unsigned reg);
-+
-+static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
-+ struct radeon_cs_packet *pkt,
-+ unsigned idx,
-+ unsigned reg)
-+{
-+ int r;
-+ u32 tile_flags = 0;
-+ u32 tmp;
-+ struct radeon_cs_reloc *reloc;
-+ struct radeon_cs_chunk *ib_chunk;
-+
-+ ib_chunk = &p->chunks[p->chunk_ib_idx];
-+
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ tmp = ib_chunk->kdata[idx] & 0x003fffff;
-+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
-+
-+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-+ tile_flags |= RADEON_DST_TILE_MACRO;
-+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-+ if (reg == RADEON_SRC_PITCH_OFFSET) {
-+ DRM_ERROR("Cannot src blit from microtiled surface\n");
-+ r100_cs_dump_packet(p, pkt);
-+ return -EINVAL;
-+ }
-+ tile_flags |= RADEON_DST_TILE_MICRO;
-+ }
-+
-+ tmp |= tile_flags;
-+ p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
-+ return 0;
-+}
-diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
-new file mode 100644
-index 0000000..6da7d92
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r100d.h
-@@ -0,0 +1,76 @@
-+/*
-+ * Copyright 2008 Advanced Micro Devices, Inc.
-+ * Copyright 2008 Red Hat Inc.
-+ * Copyright 2009 Jerome Glisse.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: Dave Airlie
-+ * Alex Deucher
-+ * Jerome Glisse
-+ */
-+#ifndef __R100D_H__
-+#define __R100D_H__
-+
-+#define CP_PACKET0 0x00000000
-+#define PACKET0_BASE_INDEX_SHIFT 0
-+#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
-+#define PACKET0_COUNT_SHIFT 16
-+#define PACKET0_COUNT_MASK (0x3fff << 16)
-+#define CP_PACKET1 0x40000000
-+#define CP_PACKET2 0x80000000
-+#define PACKET2_PAD_SHIFT 0
-+#define PACKET2_PAD_MASK (0x3fffffff << 0)
-+#define CP_PACKET3 0xC0000000
-+#define PACKET3_IT_OPCODE_SHIFT 8
-+#define PACKET3_IT_OPCODE_MASK (0xff << 8)
-+#define PACKET3_COUNT_SHIFT 16
-+#define PACKET3_COUNT_MASK (0x3fff << 16)
-+/* PACKET3 op code */
-+#define PACKET3_NOP 0x10
-+#define PACKET3_3D_DRAW_VBUF 0x28
-+#define PACKET3_3D_DRAW_IMMD 0x29
-+#define PACKET3_3D_DRAW_INDX 0x2A
-+#define PACKET3_3D_LOAD_VBPNTR 0x2F
-+#define PACKET3_INDX_BUFFER 0x33
-+#define PACKET3_3D_DRAW_VBUF_2 0x34
-+#define PACKET3_3D_DRAW_IMMD_2 0x35
-+#define PACKET3_3D_DRAW_INDX_2 0x36
-+#define PACKET3_BITBLT_MULTI 0x9B
-+
-+#define PACKET0(reg, n) (CP_PACKET0 | \
-+ REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
-+ REG_SET(PACKET0_COUNT, (n)))
-+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-+#define PACKET3(op, n) (CP_PACKET3 | \
-+ REG_SET(PACKET3_IT_OPCODE, (op)) | \
-+ REG_SET(PACKET3_COUNT, (n)))
-+
-+#define PACKET_TYPE0 0
-+#define PACKET_TYPE1 1
-+#define PACKET_TYPE2 2
-+#define PACKET_TYPE3 3
-+
-+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-+
-+#endif
-diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
-new file mode 100644
-index 0000000..568c74b
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r200.c
-@@ -0,0 +1,456 @@
-+/*
-+ * Copyright 2008 Advanced Micro Devices, Inc.
-+ * Copyright 2008 Red Hat Inc.
-+ * Copyright 2009 Jerome Glisse.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: Dave Airlie
-+ * Alex Deucher
-+ * Jerome Glisse
-+ */
-+#include "drmP.h"
-+#include "drm.h"
-+#include "radeon_drm.h"
-+#include "radeon_reg.h"
-+#include "radeon.h"
-+
-+#include "r200_reg_safe.h"
-+
-+#include "r100_track.h"
-+
-+static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
-+{
-+ int vtx_size, i;
-+ vtx_size = 2;
-+
-+ if (vtx_fmt_0 & R200_VTX_Z0)
-+ vtx_size++;
-+ if (vtx_fmt_0 & R200_VTX_W0)
-+ vtx_size++;
-+ /* blend weight */
-+ if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
-+ vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7;
-+ if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
-+ vtx_size++;
-+ if (vtx_fmt_0 & R200_VTX_N0)
-+ vtx_size += 3;
-+ if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
-+ vtx_size++;
-+ if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
-+ vtx_size++;
-+ if (vtx_fmt_0 & R200_VTX_SHININESS_0)
-+ vtx_size++;
-+ if (vtx_fmt_0 & R200_VTX_SHININESS_1)
-+ vtx_size++;
-+ for (i = 0; i < 8; i++) {
-+ int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3;
-+ switch (color_size) {
-+ case 0: break;
-+ case 1: vtx_size++; break;
-+ case 2: vtx_size += 3; break;
-+ case 3: vtx_size += 4; break;
-+ }
-+ }
-+ if (vtx_fmt_0 & R200_VTX_XY1)
-+ vtx_size += 2;
-+ if (vtx_fmt_0 & R200_VTX_Z1)
-+ vtx_size++;
-+ if (vtx_fmt_0 & R200_VTX_W1)
-+ vtx_size++;
-+ if (vtx_fmt_0 & R200_VTX_N1)
-+ vtx_size += 3;
-+ return vtx_size;
-+}
-+
-+static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
-+{
-+ int vtx_size, i, tex_size;
-+ vtx_size = 0;
-+ for (i = 0; i < 6; i++) {
-+ tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7;
-+ if (tex_size > 4)
-+ continue;
-+ vtx_size += tex_size;
-+ }
-+ return vtx_size;
-+}
-+
-+int r200_packet0_check(struct radeon_cs_parser *p,
-+ struct radeon_cs_packet *pkt,
-+ unsigned idx, unsigned reg)
-+{
-+ struct radeon_cs_chunk *ib_chunk;
-+ struct radeon_cs_reloc *reloc;
-+ struct r100_cs_track *track;
-+ volatile uint32_t *ib;
-+ uint32_t tmp;
-+ int r;
-+ int i;
-+ int face;
-+ u32 tile_flags = 0;
-+
-+ ib = p->ib->ptr;
-+ ib_chunk = &p->chunks[p->chunk_ib_idx];
-+ track = (struct r100_cs_track *)p->track;
-+
-+ switch (reg) {
-+ case RADEON_CRTC_GUI_TRIG_VLINE:
-+ r = r100_cs_packet_parse_vline(p);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ break;
-+ /* FIXME: only allow PACKET3 blit? easier to check for out of
-+ * range access */
-+ case RADEON_DST_PITCH_OFFSET:
-+ case RADEON_SRC_PITCH_OFFSET:
-+ r = r100_reloc_pitch_offset(p, pkt, idx, reg);
-+ if (r)
-+ return r;
-+ break;
-+ case RADEON_RB3D_DEPTHOFFSET:
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ track->zb.robj = reloc->robj;
-+ track->zb.offset = ib_chunk->kdata[idx];
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ break;
-+ case RADEON_RB3D_COLOROFFSET:
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ track->cb[0].robj = reloc->robj;
-+ track->cb[0].offset = ib_chunk->kdata[idx];
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ break;
-+ case R200_PP_TXOFFSET_0:
-+ case R200_PP_TXOFFSET_1:
-+ case R200_PP_TXOFFSET_2:
-+ case R200_PP_TXOFFSET_3:
-+ case R200_PP_TXOFFSET_4:
-+ case R200_PP_TXOFFSET_5:
-+ i = (reg - R200_PP_TXOFFSET_0) / 24;
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ track->textures[i].robj = reloc->robj;
-+ break;
-+ case R200_PP_CUBIC_OFFSET_F1_0:
-+ case R200_PP_CUBIC_OFFSET_F2_0:
-+ case R200_PP_CUBIC_OFFSET_F3_0:
-+ case R200_PP_CUBIC_OFFSET_F4_0:
-+ case R200_PP_CUBIC_OFFSET_F5_0:
-+ case R200_PP_CUBIC_OFFSET_F1_1:
-+ case R200_PP_CUBIC_OFFSET_F2_1:
-+ case R200_PP_CUBIC_OFFSET_F3_1:
-+ case R200_PP_CUBIC_OFFSET_F4_1:
-+ case R200_PP_CUBIC_OFFSET_F5_1:
-+ case R200_PP_CUBIC_OFFSET_F1_2:
-+ case R200_PP_CUBIC_OFFSET_F2_2:
-+ case R200_PP_CUBIC_OFFSET_F3_2:
-+ case R200_PP_CUBIC_OFFSET_F4_2:
-+ case R200_PP_CUBIC_OFFSET_F5_2:
-+ case R200_PP_CUBIC_OFFSET_F1_3:
-+ case R200_PP_CUBIC_OFFSET_F2_3:
-+ case R200_PP_CUBIC_OFFSET_F3_3:
-+ case R200_PP_CUBIC_OFFSET_F4_3:
-+ case R200_PP_CUBIC_OFFSET_F5_3:
-+ case R200_PP_CUBIC_OFFSET_F1_4:
-+ case R200_PP_CUBIC_OFFSET_F2_4:
-+ case R200_PP_CUBIC_OFFSET_F3_4:
-+ case R200_PP_CUBIC_OFFSET_F4_4:
-+ case R200_PP_CUBIC_OFFSET_F5_4:
-+ case R200_PP_CUBIC_OFFSET_F1_5:
-+ case R200_PP_CUBIC_OFFSET_F2_5:
-+ case R200_PP_CUBIC_OFFSET_F3_5:
-+ case R200_PP_CUBIC_OFFSET_F4_5:
-+ case R200_PP_CUBIC_OFFSET_F5_5:
-+ i = (reg - R200_PP_TXOFFSET_0) / 24;
-+ face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx];
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ track->textures[i].cube_info[face - 1].robj = reloc->robj;
-+ break;
-+ case RADEON_RE_WIDTH_HEIGHT:
-+ track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
-+ break;
-+ case RADEON_RB3D_COLORPITCH:
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+
-+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
-+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
-+
-+ tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
-+ tmp |= tile_flags;
-+ ib[idx] = tmp;
-+
-+ track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
-+ break;
-+ case RADEON_RB3D_DEPTHPITCH:
-+ track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
-+ break;
-+ case RADEON_RB3D_CNTL:
-+ switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
-+ case 7:
-+ case 8:
-+ case 9:
-+ case 11:
-+ case 12:
-+ track->cb[0].cpp = 1;
-+ break;
-+ case 3:
-+ case 4:
-+ case 15:
-+ track->cb[0].cpp = 2;
-+ break;
-+ case 6:
-+ track->cb[0].cpp = 4;
-+ break;
-+ default:
-+ DRM_ERROR("Invalid color buffer format (%d) !\n",
-+ ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
-+ return -EINVAL;
-+ }
-+ if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) {
-+ DRM_ERROR("No support for depth xy offset in kms\n");
-+ return -EINVAL;
-+ }
-+
-+ track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
-+ break;
-+ case RADEON_RB3D_ZSTENCILCNTL:
-+ switch (ib_chunk->kdata[idx] & 0xf) {
-+ case 0:
-+ track->zb.cpp = 2;
-+ break;
-+ case 2:
-+ case 3:
-+ case 4:
-+ case 5:
-+ case 9:
-+ case 11:
-+ track->zb.cpp = 4;
-+ break;
-+ default:
-+ break;
-+ }
-+ break;
-+ case RADEON_RB3D_ZPASS_ADDR:
-+ r = r100_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-+ idx, reg);
-+ r100_cs_dump_packet(p, pkt);
-+ return r;
-+ }
-+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-+ break;
-+ case RADEON_PP_CNTL:
-+ {
-+ uint32_t temp = ib_chunk->kdata[idx] >> 4;
-+ for (i = 0; i < track->num_texture; i++)
-+ track->textures[i].enabled = !!(temp & (1 << i));
-+ }
-+ break;
-+ case RADEON_SE_VF_CNTL:
-+ track->vap_vf_cntl = ib_chunk->kdata[idx];
-+ break;
-+ case 0x210c:
-+ /* VAP_VF_MAX_VTX_INDX */
-+ track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
-+ break;
-+ case R200_SE_VTX_FMT_0:
-+ track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]);
-+ break;
-+ case R200_SE_VTX_FMT_1:
-+ track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]);
-+ break;
-+ case R200_PP_TXSIZE_0:
-+ case R200_PP_TXSIZE_1:
-+ case R200_PP_TXSIZE_2:
-+ case R200_PP_TXSIZE_3:
-+ case R200_PP_TXSIZE_4:
-+ case R200_PP_TXSIZE_5:
-+ i = (reg - R200_PP_TXSIZE_0) / 32;
-+ track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
-+ track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
-+ break;
-+ case R200_PP_TXPITCH_0:
-+ case R200_PP_TXPITCH_1:
-+ case R200_PP_TXPITCH_2:
-+ case R200_PP_TXPITCH_3:
-+ case R200_PP_TXPITCH_4:
-+ case R200_PP_TXPITCH_5:
-+ i = (reg - R200_PP_TXPITCH_0) / 32;
-+ track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
-+ break;
-+ case R200_PP_TXFILTER_0:
-+ case R200_PP_TXFILTER_1:
-+ case R200_PP_TXFILTER_2:
-+ case R200_PP_TXFILTER_3:
-+ case R200_PP_TXFILTER_4:
-+ case R200_PP_TXFILTER_5:
-+ i = (reg - R200_PP_TXFILTER_0) / 32;
-+ track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK)
-+ >> R200_MAX_MIP_LEVEL_SHIFT);
-+ tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
-+ if (tmp == 2 || tmp == 6)
-+ track->textures[i].roundup_w = false;
-+ tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
-+ if (tmp == 2 || tmp == 6)
-+ track->textures[i].roundup_h = false;
-+ break;
-+ case R200_PP_TXMULTI_CTL_0:
-+ case R200_PP_TXMULTI_CTL_1:
-+ case R200_PP_TXMULTI_CTL_2:
-+ case R200_PP_TXMULTI_CTL_3:
-+ case R200_PP_TXMULTI_CTL_4:
-+ case R200_PP_TXMULTI_CTL_5:
-+ i = (reg - R200_PP_TXMULTI_CTL_0) / 32;
-+ break;
-+ case R200_PP_TXFORMAT_X_0:
-+ case R200_PP_TXFORMAT_X_1:
-+ case R200_PP_TXFORMAT_X_2:
-+ case R200_PP_TXFORMAT_X_3:
-+ case R200_PP_TXFORMAT_X_4:
-+ case R200_PP_TXFORMAT_X_5:
-+ i = (reg - R200_PP_TXFORMAT_X_0) / 32;
-+ track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7;
-+ tmp = (ib_chunk->kdata[idx] >> 16) & 0x3;
-+ /* 2D, 3D, CUBE */
-+ switch (tmp) {
-+ case 0:
-+ case 5:
-+ case 6:
-+ case 7:
-+ track->textures[i].tex_coord_type = 0;
-+ break;
-+ case 1:
-+ track->textures[i].tex_coord_type = 1;
-+ break;
-+ case 2:
-+ track->textures[i].tex_coord_type = 2;
-+ break;
-+ }
-+ break;
-+ case R200_PP_TXFORMAT_0:
-+ case R200_PP_TXFORMAT_1:
-+ case R200_PP_TXFORMAT_2:
-+ case R200_PP_TXFORMAT_3:
-+ case R200_PP_TXFORMAT_4:
-+ case R200_PP_TXFORMAT_5:
-+ i = (reg - R200_PP_TXFORMAT_0) / 32;
-+ if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) {
-+ track->textures[i].use_pitch = 1;
-+ } else {
-+ track->textures[i].use_pitch = 0;
-+ track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
-+ track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
-+ }
-+ switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
-+ case R200_TXFORMAT_I8:
-+ case R200_TXFORMAT_RGB332:
-+ case R200_TXFORMAT_Y8:
-+ track->textures[i].cpp = 1;
-+ break;
-+ case R200_TXFORMAT_DXT1:
-+ case R200_TXFORMAT_AI88:
-+ case R200_TXFORMAT_ARGB1555:
-+ case R200_TXFORMAT_RGB565:
-+ case R200_TXFORMAT_ARGB4444:
-+ case R200_TXFORMAT_VYUY422:
-+ case R200_TXFORMAT_YVYU422:
-+ case R200_TXFORMAT_LDVDU655:
-+ case R200_TXFORMAT_DVDU88:
-+ case R200_TXFORMAT_AVYU4444:
-+ track->textures[i].cpp = 2;
-+ break;
-+ case R200_TXFORMAT_ARGB8888:
-+ case R200_TXFORMAT_RGBA8888:
-+ case R200_TXFORMAT_ABGR8888:
-+ case R200_TXFORMAT_BGR111110:
-+ case R200_TXFORMAT_LDVDU8888:
-+ case R200_TXFORMAT_DXT23:
-+ case R200_TXFORMAT_DXT45:
-+ track->textures[i].cpp = 4;
-+ break;
-+ }
-+ track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
-+ track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
-+ break;
-+ case R200_PP_CUBIC_FACES_0:
-+ case R200_PP_CUBIC_FACES_1:
-+ case R200_PP_CUBIC_FACES_2:
-+ case R200_PP_CUBIC_FACES_3:
-+ case R200_PP_CUBIC_FACES_4:
-+ case R200_PP_CUBIC_FACES_5:
-+ tmp = ib_chunk->kdata[idx];
-+ i = (reg - R200_PP_CUBIC_FACES_0) / 32;
-+ for (face = 0; face < 4; face++) {
-+ track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
-+ track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
-+ }
-+ break;
-+ default:
-+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
-+ reg, idx);
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+int r200_init(struct radeon_device *rdev)
-+{
-+ rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
-+ rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
-+ return 0;
-+}
-diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
-index 051bca6..a5f82f7 100644
---- a/drivers/gpu/drm/radeon/r300.c
-+++ b/drivers/gpu/drm/radeon/r300.c
-@@ -32,6 +32,10 @@
- #include "radeon.h"
- #include "radeon_drm.h"
- #include "radeon_share.h"
-+#include "r100_track.h"
-+#include "r300d.h"
-+
-+#include "r300_reg_safe.h"
-
- /* r300,r350,rv350,rv370,rv380 depends on : */
- void r100_hdp_reset(struct radeon_device *rdev);
-@@ -47,14 +51,10 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx);
- int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
--int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
-- struct radeon_cs_reloc **cs_reloc);
- int r100_cs_parse_packet0(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- const unsigned *auth, unsigned n,
- radeon_packet0_check_t check);
--void r100_cs_dump_packet(struct radeon_cs_parser *p,
-- struct radeon_cs_packet *pkt);
- int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- struct radeon_object *robj);
-@@ -128,7 +128,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
- WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
- rv370_pcie_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
-- rdev->mc.gtt_size >> 20, table_addr);
-+ (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
- rdev->gart.ready = true;
- return 0;
- }
-@@ -704,307 +704,13 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
- /*
- * CS functions
- */
--struct r300_cs_track_cb {
-- struct radeon_object *robj;
-- unsigned pitch;
-- unsigned cpp;
-- unsigned offset;
--};
--
--struct r300_cs_track_array {
-- struct radeon_object *robj;
-- unsigned esize;
--};
--
--struct r300_cs_track_texture {
-- struct radeon_object *robj;
-- unsigned pitch;
-- unsigned width;
-- unsigned height;
-- unsigned num_levels;
-- unsigned cpp;
-- unsigned tex_coord_type;
-- unsigned txdepth;
-- unsigned width_11;
-- unsigned height_11;
-- bool use_pitch;
-- bool enabled;
-- bool roundup_w;
-- bool roundup_h;
--};
--
--struct r300_cs_track {
-- unsigned num_cb;
-- unsigned maxy;
-- unsigned vtx_size;
-- unsigned vap_vf_cntl;
-- unsigned immd_dwords;
-- unsigned num_arrays;
-- unsigned max_indx;
-- struct r300_cs_track_array arrays[11];
-- struct r300_cs_track_cb cb[4];
-- struct r300_cs_track_cb zb;
-- struct r300_cs_track_texture textures[16];
-- bool z_enabled;
--};
--
--static inline void r300_cs_track_texture_print(struct r300_cs_track_texture *t)
--{
-- DRM_ERROR("pitch %d\n", t->pitch);
-- DRM_ERROR("width %d\n", t->width);
-- DRM_ERROR("height %d\n", t->height);
-- DRM_ERROR("num levels %d\n", t->num_levels);
-- DRM_ERROR("depth %d\n", t->txdepth);
-- DRM_ERROR("bpp %d\n", t->cpp);
-- DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
-- DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
-- DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
--}
--
--static inline int r300_cs_track_texture_check(struct radeon_device *rdev,
-- struct r300_cs_track *track)
--{
-- struct radeon_object *robj;
-- unsigned long size;
-- unsigned u, i, w, h;
--
-- for (u = 0; u < 16; u++) {
-- if (!track->textures[u].enabled)
-- continue;
-- robj = track->textures[u].robj;
-- if (robj == NULL) {
-- DRM_ERROR("No texture bound to unit %u\n", u);
-- return -EINVAL;
-- }
-- size = 0;
-- for (i = 0; i <= track->textures[u].num_levels; i++) {
-- if (track->textures[u].use_pitch) {
-- w = track->textures[u].pitch / (1 << i);
-- } else {
-- w = track->textures[u].width / (1 << i);
-- if (rdev->family >= CHIP_RV515)
-- w |= track->textures[u].width_11;
-- if (track->textures[u].roundup_w)
-- w = roundup_pow_of_two(w);
-- }
-- h = track->textures[u].height / (1 << i);
-- if (rdev->family >= CHIP_RV515)
-- h |= track->textures[u].height_11;
-- if (track->textures[u].roundup_h)
-- h = roundup_pow_of_two(h);
-- size += w * h;
-- }
-- size *= track->textures[u].cpp;
-- switch (track->textures[u].tex_coord_type) {
-- case 0:
-- break;
-- case 1:
-- size *= (1 << track->textures[u].txdepth);
-- break;
-- case 2:
-- size *= 6;
-- break;
-- default:
-- DRM_ERROR("Invalid texture coordinate type %u for unit "
-- "%u\n", track->textures[u].tex_coord_type, u);
-- return -EINVAL;
-- }
-- if (size > radeon_object_size(robj)) {
-- DRM_ERROR("Texture of unit %u needs %lu bytes but is "
-- "%lu\n", u, size, radeon_object_size(robj));
-- r300_cs_track_texture_print(&track->textures[u]);
-- return -EINVAL;
-- }
-- }
-- return 0;
--}
--
--int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
--{
-- unsigned i;
-- unsigned long size;
-- unsigned prim_walk;
-- unsigned nverts;
--
-- for (i = 0; i < track->num_cb; i++) {
-- if (track->cb[i].robj == NULL) {
-- DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
-- return -EINVAL;
-- }
-- size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
-- size += track->cb[i].offset;
-- if (size > radeon_object_size(track->cb[i].robj)) {
-- DRM_ERROR("[drm] Buffer too small for color buffer %d "
-- "(need %lu have %lu) !\n", i, size,
-- radeon_object_size(track->cb[i].robj));
-- DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
-- i, track->cb[i].pitch, track->cb[i].cpp,
-- track->cb[i].offset, track->maxy);
-- return -EINVAL;
-- }
-- }
-- if (track->z_enabled) {
-- if (track->zb.robj == NULL) {
-- DRM_ERROR("[drm] No buffer for z buffer !\n");
-- return -EINVAL;
-- }
-- size = track->zb.pitch * track->zb.cpp * track->maxy;
-- size += track->zb.offset;
-- if (size > radeon_object_size(track->zb.robj)) {
-- DRM_ERROR("[drm] Buffer too small for z buffer "
-- "(need %lu have %lu) !\n", size,
-- radeon_object_size(track->zb.robj));
-- return -EINVAL;
-- }
-- }
-- prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
-- nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
-- switch (prim_walk) {
-- case 1:
-- for (i = 0; i < track->num_arrays; i++) {
-- size = track->arrays[i].esize * track->max_indx * 4;
-- if (track->arrays[i].robj == NULL) {
-- DRM_ERROR("(PW %u) Vertex array %u no buffer "
-- "bound\n", prim_walk, i);
-- return -EINVAL;
-- }
-- if (size > radeon_object_size(track->arrays[i].robj)) {
-- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-- "have %lu dwords\n", prim_walk, i,
-- size >> 2,
-- radeon_object_size(track->arrays[i].robj) >> 2);
-- DRM_ERROR("Max indices %u\n", track->max_indx);
-- return -EINVAL;
-- }
-- }
-- break;
-- case 2:
-- for (i = 0; i < track->num_arrays; i++) {
-- size = track->arrays[i].esize * (nverts - 1) * 4;
-- if (track->arrays[i].robj == NULL) {
-- DRM_ERROR("(PW %u) Vertex array %u no buffer "
-- "bound\n", prim_walk, i);
-- return -EINVAL;
-- }
-- if (size > radeon_object_size(track->arrays[i].robj)) {
-- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-- "have %lu dwords\n", prim_walk, i, size >> 2,
-- radeon_object_size(track->arrays[i].robj) >> 2);
-- return -EINVAL;
-- }
-- }
-- break;
-- case 3:
-- size = track->vtx_size * nverts;
-- if (size != track->immd_dwords) {
-- DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
-- track->immd_dwords, size);
-- DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
-- nverts, track->vtx_size);
-- return -EINVAL;
-- }
-- break;
-- default:
-- DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
-- prim_walk);
-- return -EINVAL;
-- }
-- return r300_cs_track_texture_check(rdev, track);
--}
--
--static inline void r300_cs_track_clear(struct r300_cs_track *track)
--{
-- unsigned i;
--
-- track->num_cb = 4;
-- track->maxy = 4096;
-- for (i = 0; i < track->num_cb; i++) {
-- track->cb[i].robj = NULL;
-- track->cb[i].pitch = 8192;
-- track->cb[i].cpp = 16;
-- track->cb[i].offset = 0;
-- }
-- track->z_enabled = true;
-- track->zb.robj = NULL;
-- track->zb.pitch = 8192;
-- track->zb.cpp = 4;
-- track->zb.offset = 0;
-- track->vtx_size = 0x7F;
-- track->immd_dwords = 0xFFFFFFFFUL;
-- track->num_arrays = 11;
-- track->max_indx = 0x00FFFFFFUL;
-- for (i = 0; i < track->num_arrays; i++) {
-- track->arrays[i].robj = NULL;
-- track->arrays[i].esize = 0x7F;
-- }
-- for (i = 0; i < 16; i++) {
-- track->textures[i].pitch = 16536;
-- track->textures[i].width = 16536;
-- track->textures[i].height = 16536;
-- track->textures[i].width_11 = 1 << 11;
-- track->textures[i].height_11 = 1 << 11;
-- track->textures[i].num_levels = 12;
-- track->textures[i].txdepth = 16;
-- track->textures[i].cpp = 64;
-- track->textures[i].tex_coord_type = 1;
-- track->textures[i].robj = NULL;
-- /* CS IB emission code makes sure texture unit are disabled */
-- track->textures[i].enabled = false;
-- track->textures[i].roundup_w = true;
-- track->textures[i].roundup_h = true;
-- }
--}
--
--static const unsigned r300_reg_safe_bm[159] = {
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
-- 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
-- 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
-- 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
-- 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
-- 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
-- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-- 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
-- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-- 0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
--};
--
- static int r300_packet0_check(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx, unsigned reg)
- {
- struct radeon_cs_chunk *ib_chunk;
- struct radeon_cs_reloc *reloc;
-- struct r300_cs_track *track;
-+ struct r100_cs_track *track;
- volatile uint32_t *ib;
- uint32_t tmp, tile_flags = 0;
- unsigned i;
-@@ -1012,7 +718,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
-
- ib = p->ib->ptr;
- ib_chunk = &p->chunks[p->chunk_ib_idx];
-- track = (struct r300_cs_track*)p->track;
-+ track = (struct r100_cs_track *)p->track;
- switch(reg) {
- case AVIVO_D1MODE_VLINE_START_END:
- case RADEON_CRTC_GUI_TRIG_VLINE:
-@@ -1026,28 +732,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
- break;
- case RADEON_DST_PITCH_OFFSET:
- case RADEON_SRC_PITCH_OFFSET:
-- r = r100_cs_packet_next_reloc(p, &reloc);
-- if (r) {
-- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-- idx, reg);
-- r100_cs_dump_packet(p, pkt);
-+ r = r100_reloc_pitch_offset(p, pkt, idx, reg);
-+ if (r)
- return r;
-- }
-- tmp = ib_chunk->kdata[idx] & 0x003fffff;
-- tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
--
-- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-- tile_flags |= RADEON_DST_TILE_MACRO;
-- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-- if (reg == RADEON_SRC_PITCH_OFFSET) {
-- DRM_ERROR("Cannot src blit from microtiled surface\n");
-- r100_cs_dump_packet(p, pkt);
-- return -EINVAL;
-- }
-- tile_flags |= RADEON_DST_TILE_MICRO;
-- }
-- tmp |= tile_flags;
-- ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
- break;
- case R300_RB3D_COLOROFFSET0:
- case R300_RB3D_COLOROFFSET1:
-@@ -1256,42 +943,41 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
- tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
- track->textures[i].tex_coord_type = tmp;
- switch ((ib_chunk->kdata[idx] & 0x1F)) {
-- case 0:
-- case 2:
-- case 5:
-- case 18:
-- case 20:
-- case 21:
-+ case R300_TX_FORMAT_X8:
-+ case R300_TX_FORMAT_Y4X4:
-+ case R300_TX_FORMAT_Z3Y3X2:
- track->textures[i].cpp = 1;
- break;
-- case 1:
-- case 3:
-- case 6:
-- case 7:
-- case 10:
-- case 11:
-- case 19:
-- case 22:
-- case 24:
-+ case R300_TX_FORMAT_X16:
-+ case R300_TX_FORMAT_Y8X8:
-+ case R300_TX_FORMAT_Z5Y6X5:
-+ case R300_TX_FORMAT_Z6Y5X5:
-+ case R300_TX_FORMAT_W4Z4Y4X4:
-+ case R300_TX_FORMAT_W1Z5Y5X5:
-+ case R300_TX_FORMAT_DXT1:
-+ case R300_TX_FORMAT_D3DMFT_CxV8U8:
-+ case R300_TX_FORMAT_B8G8_B8G8:
-+ case R300_TX_FORMAT_G8R8_G8B8:
- track->textures[i].cpp = 2;
- break;
-- case 4:
-- case 8:
-- case 9:
-- case 12:
-- case 13:
-- case 23:
-- case 25:
-- case 27:
-- case 30:
-+ case R300_TX_FORMAT_Y16X16:
-+ case R300_TX_FORMAT_Z11Y11X10:
-+ case R300_TX_FORMAT_Z10Y11X11:
-+ case R300_TX_FORMAT_W8Z8Y8X8:
-+ case R300_TX_FORMAT_W2Z10Y10X10:
-+ case 0x17:
-+ case R300_TX_FORMAT_FL_I32:
-+ case 0x1e:
-+ case R300_TX_FORMAT_DXT3:
-+ case R300_TX_FORMAT_DXT5:
- track->textures[i].cpp = 4;
- break;
-- case 14:
-- case 26:
-- case 28:
-+ case R300_TX_FORMAT_W16Z16Y16X16:
-+ case R300_TX_FORMAT_FL_R16G16B16A16:
-+ case R300_TX_FORMAT_FL_I32A32:
- track->textures[i].cpp = 8;
- break;
-- case 29:
-+ case R300_TX_FORMAT_FL_R32G32B32A32:
- track->textures[i].cpp = 16;
- break;
- default:
-@@ -1319,11 +1005,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
- case 0x443C:
- /* TX_FILTER0_[0-15] */
- i = (reg - 0x4400) >> 2;
-- tmp = ib_chunk->kdata[idx] & 0x7;;
-+ tmp = ib_chunk->kdata[idx] & 0x7;
- if (tmp == 2 || tmp == 4 || tmp == 6) {
- track->textures[i].roundup_w = false;
- }
-- tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;;
-+ tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;
- if (tmp == 2 || tmp == 4 || tmp == 6) {
- track->textures[i].roundup_h = false;
- }
-@@ -1411,8 +1097,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt)
- {
- struct radeon_cs_chunk *ib_chunk;
-+
- struct radeon_cs_reloc *reloc;
-- struct r300_cs_track *track;
-+ struct r100_cs_track *track;
- volatile uint32_t *ib;
- unsigned idx;
- unsigned i, c;
-@@ -1421,7 +1108,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
- ib = p->ib->ptr;
- ib_chunk = &p->chunks[p->chunk_ib_idx];
- idx = pkt->idx + 1;
-- track = (struct r300_cs_track*)p->track;
-+ track = (struct r100_cs_track *)p->track;
- switch(pkt->opcode) {
- case PACKET3_3D_LOAD_VBPNTR:
- c = ib_chunk->kdata[idx++] & 0x1F;
-@@ -1488,7 +1175,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
- }
- track->vap_vf_cntl = ib_chunk->kdata[idx+1];
- track->immd_dwords = pkt->count - 1;
-- r = r300_cs_track_check(p->rdev, track);
-+ r = r100_cs_track_check(p->rdev, track);
- if (r) {
- return r;
- }
-@@ -1503,35 +1190,35 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
- }
- track->vap_vf_cntl = ib_chunk->kdata[idx];
- track->immd_dwords = pkt->count;
-- r = r300_cs_track_check(p->rdev, track);
-+ r = r100_cs_track_check(p->rdev, track);
- if (r) {
- return r;
- }
- break;
- case PACKET3_3D_DRAW_VBUF:
- track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
-- r = r300_cs_track_check(p->rdev, track);
-+ r = r100_cs_track_check(p->rdev, track);
- if (r) {
- return r;
- }
- break;
- case PACKET3_3D_DRAW_VBUF_2:
- track->vap_vf_cntl = ib_chunk->kdata[idx];
-- r = r300_cs_track_check(p->rdev, track);
-+ r = r100_cs_track_check(p->rdev, track);
- if (r) {
- return r;
- }
- break;
- case PACKET3_3D_DRAW_INDX:
- track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
-- r = r300_cs_track_check(p->rdev, track);
-+ r = r100_cs_track_check(p->rdev, track);
- if (r) {
- return r;
- }
- break;
- case PACKET3_3D_DRAW_INDX_2:
- track->vap_vf_cntl = ib_chunk->kdata[idx];
-- r = r300_cs_track_check(p->rdev, track);
-+ r = r100_cs_track_check(p->rdev, track);
- if (r) {
- return r;
- }
-@@ -1548,10 +1235,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
- int r300_cs_parse(struct radeon_cs_parser *p)
- {
- struct radeon_cs_packet pkt;
-- struct r300_cs_track track;
-+ struct r100_cs_track track;
- int r;
-
-- r300_cs_track_clear(&track);
-+ r100_cs_track_clear(p->rdev, &track);
- p->track = &track;
- do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
-diff --git a/drivers/gpu/drm/radeon/r300.h b/drivers/gpu/drm/radeon/r300.h
-deleted file mode 100644
-index 8486b4d..0000000
---- a/drivers/gpu/drm/radeon/r300.h
-+++ /dev/null
-@@ -1,36 +0,0 @@
--/*
-- * Copyright 2008 Advanced Micro Devices, Inc.
-- * Copyright 2008 Red Hat Inc.
-- * Copyright 2009 Jerome Glisse.
-- *
-- * Permission is hereby granted, free of charge, to any person obtaining a
-- * copy of this software and associated documentation files (the "Software"),
-- * to deal in the Software without restriction, including without limitation
-- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-- * and/or sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following conditions:
-- *
-- * The above copyright notice and this permission notice shall be included in
-- * all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
-- *
-- * Authors: Dave Airlie
-- * Alex Deucher
-- * Jerome Glisse
-- */
--#ifndef R300_H
--#define R300_H
--
--struct r300_asic {
-- const unsigned *reg_safe_bm;
-- unsigned reg_safe_bm_size;
--};
--
--#endif
-diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
-new file mode 100644
-index 0000000..63ec076
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r300d.h
-@@ -0,0 +1,76 @@
-+/*
-+ * Copyright 2008 Advanced Micro Devices, Inc.
-+ * Copyright 2008 Red Hat Inc.
-+ * Copyright 2009 Jerome Glisse.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: Dave Airlie
-+ * Alex Deucher
-+ * Jerome Glisse
-+ */
-+#ifndef __R300D_H__
-+#define __R300D_H__
-+
-+#define CP_PACKET0 0x00000000
-+#define PACKET0_BASE_INDEX_SHIFT 0
-+#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
-+#define PACKET0_COUNT_SHIFT 16
-+#define PACKET0_COUNT_MASK (0x3fff << 16)
-+#define CP_PACKET1 0x40000000
-+#define CP_PACKET2 0x80000000
-+#define PACKET2_PAD_SHIFT 0
-+#define PACKET2_PAD_MASK (0x3fffffff << 0)
-+#define CP_PACKET3 0xC0000000
-+#define PACKET3_IT_OPCODE_SHIFT 8
-+#define PACKET3_IT_OPCODE_MASK (0xff << 8)
-+#define PACKET3_COUNT_SHIFT 16
-+#define PACKET3_COUNT_MASK (0x3fff << 16)
-+/* PACKET3 op code */
-+#define PACKET3_NOP 0x10
-+#define PACKET3_3D_DRAW_VBUF 0x28
-+#define PACKET3_3D_DRAW_IMMD 0x29
-+#define PACKET3_3D_DRAW_INDX 0x2A
-+#define PACKET3_3D_LOAD_VBPNTR 0x2F
-+#define PACKET3_INDX_BUFFER 0x33
-+#define PACKET3_3D_DRAW_VBUF_2 0x34
-+#define PACKET3_3D_DRAW_IMMD_2 0x35
-+#define PACKET3_3D_DRAW_INDX_2 0x36
-+#define PACKET3_BITBLT_MULTI 0x9B
-+
-+#define PACKET0(reg, n) (CP_PACKET0 | \
-+ REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
-+ REG_SET(PACKET0_COUNT, (n)))
-+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-+#define PACKET3(op, n) (CP_PACKET3 | \
-+ REG_SET(PACKET3_IT_OPCODE, (op)) | \
-+ REG_SET(PACKET3_COUNT, (n)))
-+
-+#define PACKET_TYPE0 0
-+#define PACKET_TYPE1 1
-+#define PACKET_TYPE2 2
-+#define PACKET_TYPE3 3
-+
-+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-+
-+#endif
-diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
-index 538cd90..d8fcef4 100644
---- a/drivers/gpu/drm/radeon/r600.c
-+++ b/drivers/gpu/drm/radeon/r600.c
-@@ -25,12 +25,27 @@
- * Alex Deucher
- * Jerome Glisse
- */
-+#include <linux/seq_file.h>
-+#include <linux/firmware.h>
-+#include <linux/platform_device.h>
- #include "drmP.h"
--#include "radeon_reg.h"
-+#include "radeon_drm.h"
- #include "radeon.h"
-+#include "radeon_mode.h"
-+#include "radeon_share.h"
-+#include "r600d.h"
-+#include "avivod.h"
-+#include "atom.h"
-
--/* r600,rv610,rv630,rv620,rv635,rv670 depends on : */
--void rs600_mc_disable_clients(struct radeon_device *rdev);
-+#define PFP_UCODE_SIZE 576
-+#define PM4_UCODE_SIZE 1792
-+#define R700_PFP_UCODE_SIZE 848
-+#define R700_PM4_UCODE_SIZE 1360
-+
-+/* Firmware Names */
-+/*(DEBLOBBED)*/
-+
-+int r600_debugfs_mc_info_init(struct radeon_device *rdev);
-
- /* This files gather functions specifics to:
- * r600,rv610,rv630,rv620,rv635,rv670
-@@ -39,87 +73,270 @@ void rs600_mc_disable_clients(struct radeon_device *rdev);
- */
- int r600_mc_wait_for_idle(struct radeon_device *rdev);
- void r600_gpu_init(struct radeon_device *rdev);
-+void r600_fini(struct radeon_device *rdev);
-
-
- /*
-- * MC
-+ * R600 PCIE GART
- */
--int r600_mc_init(struct radeon_device *rdev)
-+int r600_gart_clear_page(struct radeon_device *rdev, int i)
- {
-- uint32_t tmp;
-+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
-+ u64 pte;
-
-- r600_gpu_init(rdev);
-+ if (i < 0 || i > rdev->gart.num_gpu_pages)
-+ return -EINVAL;
-+ pte = 0;
-+ writeq(pte, ((void __iomem *)ptr) + (i * 8));
-+ return 0;
-+}
-
-- /* setup the gart before changing location so we can ask to
-- * discard unmapped mc request
-- */
-- /* FIXME: disable out of gart access */
-- tmp = rdev->mc.gtt_location / 4096;
-- tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
-- WREG32(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
-- tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
-- tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
-- WREG32(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
--
-- rs600_mc_disable_clients(rdev);
-- if (r600_mc_wait_for_idle(rdev)) {
-- printk(KERN_WARNING "Failed to wait MC idle while "
-- "programming pipes. Bad things might happen.\n");
-+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
-+{
-+ unsigned i;
-+ u32 tmp;
-+
-+ WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
-+ WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
-+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
-+ for (i = 0; i < rdev->usec_timeout; i++) {
-+ /* read MC_STATUS */
-+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
-+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
-+ if (tmp == 2) {
-+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
-+ return;
-+ }
-+ if (tmp) {
-+ return;
-+ }
-+ udelay(1);
-+ }
-+}
-+
-+int r600_pcie_gart_enable(struct radeon_device *rdev)
-+{
-+ u32 tmp;
-+ int r, i;
-+
-+ /* Initialize common gart structure */
-+ r = radeon_gart_init(rdev);
-+ if (r) {
-+ return r;
-+ }
-+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
-+ r = radeon_gart_table_vram_alloc(rdev);
-+ if (r) {
-+ return r;
- }
-+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
-+ r600_gart_clear_page(rdev, i);
-+ /* Setup L2 cache */
-+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
-+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
-+ EFFECTIVE_L2_QUEUE_SIZE(7));
-+ WREG32(VM_L2_CNTL2, 0);
-+ WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
-+ /* Setup TLB control */
-+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
-+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
-+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
-+ ENABLE_WAIT_L2_QUERY;
-+ WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
-+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
-+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
-+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
-+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
-+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
-+ (u32)(rdev->dummy_page.addr >> 12));
-+ for (i = 1; i < 7; i++)
-+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
-
-- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-- tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
-- tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
-- WREG32(R600_MC_VM_FB_LOCATION, tmp);
-- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
-- tmp = REG_SET(R600_MC_AGP_TOP, tmp >> 22);
-- WREG32(R600_MC_VM_AGP_TOP, tmp);
-- tmp = REG_SET(R600_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
-- WREG32(R600_MC_VM_AGP_BOT, tmp);
-+ r600_pcie_gart_tlb_flush(rdev);
-+ rdev->gart.ready = true;
- return 0;
- }
-
--void r600_mc_fini(struct radeon_device *rdev)
-+void r600_pcie_gart_disable(struct radeon_device *rdev)
- {
-- /* FIXME: implement */
--}
-+ u32 tmp;
-+ int i;
-
-+ /* Clear ptes*/
-+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
-+ r600_gart_clear_page(rdev, i);
-+ r600_pcie_gart_tlb_flush(rdev);
-+ /* Disable all tables */
-+ for (i = 0; i < 7; i++)
-+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
-
--/*
-- * Global GPU functions
-- */
--void r600_errata(struct radeon_device *rdev)
--{
-- rdev->pll_errata = 0;
-+ /* Disable L2 cache */
-+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
-+ EFFECTIVE_L2_QUEUE_SIZE(7));
-+ WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
-+ /* Setup L1 TLB control */
-+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
-+ ENABLE_WAIT_L2_QUERY;
-+ WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
-+ WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
- }
-
- int r600_mc_wait_for_idle(struct radeon_device *rdev)
- {
-- /* FIXME: implement */
-- return 0;
-+ unsigned i;
-+ u32 tmp;
-+
-+ for (i = 0; i < rdev->usec_timeout; i++) {
-+ /* read MC_STATUS */
-+ tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
-+ if (!tmp)
-+ return 0;
-+ udelay(1);
-+ }
-+ return -1;
- }
-
--void r600_gpu_init(struct radeon_device *rdev)
-+static void r600_mc_resume(struct radeon_device *rdev)
- {
-- /* FIXME: implement */
--}
-+ u32 d1vga_control, d2vga_control;
-+ u32 vga_render_control, vga_hdp_control;
-+ u32 d1crtc_control, d2crtc_control;
-+ u32 new_d1grph_primary, new_d1grph_secondary;
-+ u32 new_d2grph_primary, new_d2grph_secondary;
-+ u64 old_vram_start;
-+ u32 tmp;
-+ int i, j;
-
-+ /* Initialize HDP */
-+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
-+ WREG32((0x2c14 + j), 0x00000000);
-+ WREG32((0x2c18 + j), 0x00000000);
-+ WREG32((0x2c1c + j), 0x00000000);
-+ WREG32((0x2c20 + j), 0x00000000);
-+ WREG32((0x2c24 + j), 0x00000000);
-+ }
-+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
-
--/*
-- * VRAM info
-- */
--void r600_vram_get_type(struct radeon_device *rdev)
-+ d1vga_control = RREG32(D1VGA_CONTROL);
-+ d2vga_control = RREG32(D2VGA_CONTROL);
-+ vga_render_control = RREG32(VGA_RENDER_CONTROL);
-+ vga_hdp_control = RREG32(VGA_HDP_CONTROL);
-+ d1crtc_control = RREG32(D1CRTC_CONTROL);
-+ d2crtc_control = RREG32(D2CRTC_CONTROL);
-+ old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
-+ new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
-+ new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
-+ new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
-+ new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
-+ new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
-+ new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
-+ new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
-+ new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
-+
-+ /* Stop all video */
-+ WREG32(D1VGA_CONTROL, 0);
-+ WREG32(D2VGA_CONTROL, 0);
-+ WREG32(VGA_RENDER_CONTROL, 0);
-+ WREG32(D1CRTC_UPDATE_LOCK, 1);
-+ WREG32(D2CRTC_UPDATE_LOCK, 1);
-+ WREG32(D1CRTC_CONTROL, 0);
-+ WREG32(D2CRTC_CONTROL, 0);
-+ WREG32(D1CRTC_UPDATE_LOCK, 0);
-+ WREG32(D2CRTC_UPDATE_LOCK, 0);
-+
-+ mdelay(1);
-+ if (r600_mc_wait_for_idle(rdev)) {
-+ printk(KERN_WARNING "[drm] MC not idle !\n");
-+ }
-+
-+ /* Lockout access through VGA aperture*/
-+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
-+
-+ /* Update configuration */
-+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
-+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
-+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
-+ tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
-+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
-+ WREG32(MC_VM_FB_LOCATION, tmp);
-+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
-+ WREG32(HDP_NONSURFACE_INFO, (2 << 7));
-+ WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
-+ if (rdev->flags & RADEON_IS_AGP) {
-+ WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
-+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
-+ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
-+ } else {
-+ WREG32(MC_VM_AGP_BASE, 0);
-+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
-+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
-+ }
-+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
-+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
-+ WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
-+ WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
-+ WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-+
-+ /* Unlock host access */
-+ WREG32(VGA_HDP_CONTROL, vga_hdp_control);
-+
-+ mdelay(1);
-+ if (r600_mc_wait_for_idle(rdev)) {
-+ printk(KERN_WARNING "[drm] MC not idle !\n");
-+ }
-+
-+ /* Restore video state */
-+ WREG32(D1CRTC_UPDATE_LOCK, 1);
-+ WREG32(D2CRTC_UPDATE_LOCK, 1);
-+ WREG32(D1CRTC_CONTROL, d1crtc_control);
-+ WREG32(D2CRTC_CONTROL, d2crtc_control);
-+ WREG32(D1CRTC_UPDATE_LOCK, 0);
-+ WREG32(D2CRTC_UPDATE_LOCK, 0);
-+ WREG32(D1VGA_CONTROL, d1vga_control);
-+ WREG32(D2VGA_CONTROL, d2vga_control);
-+ WREG32(VGA_RENDER_CONTROL, vga_render_control);
-+}
-+
-+int r600_mc_init(struct radeon_device *rdev)
- {
-- uint32_t tmp;
-+ fixed20_12 a;
-+ u32 tmp;
- int chansize;
-+ int r;
-
-+ /* Get VRAM informations */
- rdev->mc.vram_width = 128;
- rdev->mc.vram_is_ddr = true;
--
-- tmp = RREG32(R600_RAMCFG);
-- if (tmp & R600_CHANSIZE_OVERRIDE) {
-+ tmp = RREG32(RAMCFG);
-+ if (tmp & CHANSIZE_OVERRIDE) {
- chansize = 16;
-- } else if (tmp & R600_CHANSIZE) {
-+ } else if (tmp & CHANSIZE_MASK) {
- chansize = 64;
- } else {
- chansize = 32;
-@@ -135,36 +352,1391 @@ void r600_vram_get_type(struct radeon_device *rdev)
- (rdev->family == CHIP_RV635)) {
- rdev->mc.vram_width = 2 * chansize;
- }
-+ /* Could aper size report 0 ? */
-+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-+ /* Setup GPU memory space */
-+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
-+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
-+ if (rdev->flags & RADEON_IS_AGP) {
-+ r = radeon_agp_init(rdev);
-+ if (r)
-+ return r;
-+ /* gtt_size is setup by radeon_agp_init */
-+ rdev->mc.gtt_location = rdev->mc.agp_base;
-+ tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
-+ /* Try to put vram before or after AGP because we
-+ * we want SYSTEM_APERTURE to cover both VRAM and
-+ * AGP so that GPU can catch out of VRAM/AGP access
-+ */
-+ if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
-+ /* Enought place before */
-+ rdev->mc.vram_location = rdev->mc.gtt_location -
-+ rdev->mc.mc_vram_size;
-+ } else if (tmp > rdev->mc.mc_vram_size) {
-+ /* Enought place after */
-+ rdev->mc.vram_location = rdev->mc.gtt_location +
-+ rdev->mc.gtt_size;
-+ } else {
-+ /* Try to setup VRAM then AGP might not
-+ * not work on some card
-+ */
-+ rdev->mc.vram_location = 0x00000000UL;
-+ rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-+ }
-+ } else {
-+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
-+ rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
-+ 0xFFFF) << 24;
-+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-+ tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
-+ if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
-+ /* Enough place after vram */
-+ rdev->mc.gtt_location = tmp;
-+ } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
-+ /* Enough place before vram */
-+ rdev->mc.gtt_location = 0;
-+ } else {
-+ /* Not enough place after or before shrink
-+ * gart size
-+ */
-+ if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
-+ rdev->mc.gtt_location = 0;
-+ rdev->mc.gtt_size = rdev->mc.vram_location;
-+ } else {
-+ rdev->mc.gtt_location = tmp;
-+ rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
-+ }
-+ }
-+ rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-+ } else {
-+ rdev->mc.vram_location = 0x00000000UL;
-+ rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-+ }
-+ }
-+ rdev->mc.vram_start = rdev->mc.vram_location;
-+ rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
-+ rdev->mc.gtt_start = rdev->mc.gtt_location;
-+ rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
-+ /* FIXME: we should enforce default clock in case GPU is not in
-+ * default setup
-+ */
-+ a.full = rfixed_const(100);
-+ rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-+ rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-+ return 0;
- }
-
--void r600_vram_info(struct radeon_device *rdev)
-+/* We doesn't check that the GPU really needs a reset we simply do the
-+ * reset, it's up to the caller to determine if the GPU needs one. We
-+ * might add an helper function to check that.
-+ */
-+int r600_gpu_soft_reset(struct radeon_device *rdev)
- {
-- r600_vram_get_type(rdev);
-- rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE);
-- rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-+ u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
-+ S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
-+ S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
-+ S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
-+ S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
-+ S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
-+ S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
-+ S_008010_GUI_ACTIVE(1);
-+ u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
-+ S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
-+ S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
-+ S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
-+ S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
-+ S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
-+ S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
-+ S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
-+ u32 srbm_reset = 0;
-
-- /* Could aper size report 0 ? */
-- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-+ /* Disable CP parsing/prefetching */
-+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
-+ /* Check if any of the rendering block is busy and reset it */
-+ if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
-+ (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
-+ WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) |
-+ S_008020_SOFT_RESET_DB(1) |
-+ S_008020_SOFT_RESET_CB(1) |
-+ S_008020_SOFT_RESET_PA(1) |
-+ S_008020_SOFT_RESET_SC(1) |
-+ S_008020_SOFT_RESET_SMX(1) |
-+ S_008020_SOFT_RESET_SPI(1) |
-+ S_008020_SOFT_RESET_SX(1) |
-+ S_008020_SOFT_RESET_SH(1) |
-+ S_008020_SOFT_RESET_TC(1) |
-+ S_008020_SOFT_RESET_TA(1) |
-+ S_008020_SOFT_RESET_VC(1) |
-+ S_008020_SOFT_RESET_VGT(1));
-+ (void)RREG32(R_008020_GRBM_SOFT_RESET);
-+ udelay(50);
-+ WREG32(R_008020_GRBM_SOFT_RESET, 0);
-+ (void)RREG32(R_008020_GRBM_SOFT_RESET);
-+ }
-+ /* Reset CP (we always reset CP) */
-+ WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1));
-+ (void)RREG32(R_008020_GRBM_SOFT_RESET);
-+ udelay(50);
-+ WREG32(R_008020_GRBM_SOFT_RESET, 0);
-+ (void)RREG32(R_008020_GRBM_SOFT_RESET);
-+ /* Reset others GPU block if necessary */
-+ if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
-+ if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
-+ if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_IH(1);
-+ if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
-+ if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-+ if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-+ if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-+ if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-+ if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_MC(1);
-+ if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
-+ if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
-+ srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
-+ WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
-+ (void)RREG32(R_000E60_SRBM_SOFT_RESET);
-+ udelay(50);
-+ WREG32(R_000E60_SRBM_SOFT_RESET, 0);
-+ (void)RREG32(R_000E60_SRBM_SOFT_RESET);
-+ /* Wait a little for things to settle down */
-+ udelay(50);
-+ return 0;
-+}
-+
-+int r600_gpu_reset(struct radeon_device *rdev)
-+{
-+ return r600_gpu_soft_reset(rdev);
-+}
-+
-+static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
-+ u32 num_backends,
-+ u32 backend_disable_mask)
-+{
-+ u32 backend_map = 0;
-+ u32 enabled_backends_mask;
-+ u32 enabled_backends_count;
-+ u32 cur_pipe;
-+ u32 swizzle_pipe[R6XX_MAX_PIPES];
-+ u32 cur_backend;
-+ u32 i;
-+
-+ if (num_tile_pipes > R6XX_MAX_PIPES)
-+ num_tile_pipes = R6XX_MAX_PIPES;
-+ if (num_tile_pipes < 1)
-+ num_tile_pipes = 1;
-+ if (num_backends > R6XX_MAX_BACKENDS)
-+ num_backends = R6XX_MAX_BACKENDS;
-+ if (num_backends < 1)
-+ num_backends = 1;
-+
-+ enabled_backends_mask = 0;
-+ enabled_backends_count = 0;
-+ for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
-+ if (((backend_disable_mask >> i) & 1) == 0) {
-+ enabled_backends_mask |= (1 << i);
-+ ++enabled_backends_count;
-+ }
-+ if (enabled_backends_count == num_backends)
-+ break;
-+ }
-+
-+ if (enabled_backends_count == 0) {
-+ enabled_backends_mask = 1;
-+ enabled_backends_count = 1;
-+ }
-+
-+ if (enabled_backends_count != num_backends)
-+ num_backends = enabled_backends_count;
-+
-+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
-+ switch (num_tile_pipes) {
-+ case 1:
-+ swizzle_pipe[0] = 0;
-+ break;
-+ case 2:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 1;
-+ break;
-+ case 3:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 1;
-+ swizzle_pipe[2] = 2;
-+ break;
-+ case 4:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 1;
-+ swizzle_pipe[2] = 2;
-+ swizzle_pipe[3] = 3;
-+ break;
-+ case 5:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 1;
-+ swizzle_pipe[2] = 2;
-+ swizzle_pipe[3] = 3;
-+ swizzle_pipe[4] = 4;
-+ break;
-+ case 6:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 4;
-+ swizzle_pipe[3] = 5;
-+ swizzle_pipe[4] = 1;
-+ swizzle_pipe[5] = 3;
-+ break;
-+ case 7:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 4;
-+ swizzle_pipe[3] = 6;
-+ swizzle_pipe[4] = 1;
-+ swizzle_pipe[5] = 3;
-+ swizzle_pipe[6] = 5;
-+ break;
-+ case 8:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 4;
-+ swizzle_pipe[3] = 6;
-+ swizzle_pipe[4] = 1;
-+ swizzle_pipe[5] = 3;
-+ swizzle_pipe[6] = 5;
-+ swizzle_pipe[7] = 7;
-+ break;
-+ }
-+
-+ cur_backend = 0;
-+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
-+ cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-+
-+ backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-+
-+ cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-+ }
-+
-+ return backend_map;
-+}
-+
-+int r600_count_pipe_bits(uint32_t val)
-+{
-+ int i, ret = 0;
-+
-+ for (i = 0; i < 32; i++) {
-+ ret += val & 1;
-+ val >>= 1;
-+ }
-+ return ret;
- }
-
-+void r600_gpu_init(struct radeon_device *rdev)
-+{
-+ u32 tiling_config;
-+ u32 ramcfg;
-+ u32 tmp;
-+ int i, j;
-+ u32 sq_config;
-+ u32 sq_gpr_resource_mgmt_1 = 0;
-+ u32 sq_gpr_resource_mgmt_2 = 0;
-+ u32 sq_thread_resource_mgmt = 0;
-+ u32 sq_stack_resource_mgmt_1 = 0;
-+ u32 sq_stack_resource_mgmt_2 = 0;
-+
-+ /* FIXME: implement */
-+ switch (rdev->family) {
-+ case CHIP_R600:
-+ rdev->config.r600.max_pipes = 4;
-+ rdev->config.r600.max_tile_pipes = 8;
-+ rdev->config.r600.max_simds = 4;
-+ rdev->config.r600.max_backends = 4;
-+ rdev->config.r600.max_gprs = 256;
-+ rdev->config.r600.max_threads = 192;
-+ rdev->config.r600.max_stack_entries = 256;
-+ rdev->config.r600.max_hw_contexts = 8;
-+ rdev->config.r600.max_gs_threads = 16;
-+ rdev->config.r600.sx_max_export_size = 128;
-+ rdev->config.r600.sx_max_export_pos_size = 16;
-+ rdev->config.r600.sx_max_export_smx_size = 128;
-+ rdev->config.r600.sq_num_cf_insts = 2;
-+ break;
-+ case CHIP_RV630:
-+ case CHIP_RV635:
-+ rdev->config.r600.max_pipes = 2;
-+ rdev->config.r600.max_tile_pipes = 2;
-+ rdev->config.r600.max_simds = 3;
-+ rdev->config.r600.max_backends = 1;
-+ rdev->config.r600.max_gprs = 128;
-+ rdev->config.r600.max_threads = 192;
-+ rdev->config.r600.max_stack_entries = 128;
-+ rdev->config.r600.max_hw_contexts = 8;
-+ rdev->config.r600.max_gs_threads = 4;
-+ rdev->config.r600.sx_max_export_size = 128;
-+ rdev->config.r600.sx_max_export_pos_size = 16;
-+ rdev->config.r600.sx_max_export_smx_size = 128;
-+ rdev->config.r600.sq_num_cf_insts = 2;
-+ break;
-+ case CHIP_RV610:
-+ case CHIP_RV620:
-+ case CHIP_RS780:
-+ case CHIP_RS880:
-+ rdev->config.r600.max_pipes = 1;
-+ rdev->config.r600.max_tile_pipes = 1;
-+ rdev->config.r600.max_simds = 2;
-+ rdev->config.r600.max_backends = 1;
-+ rdev->config.r600.max_gprs = 128;
-+ rdev->config.r600.max_threads = 192;
-+ rdev->config.r600.max_stack_entries = 128;
-+ rdev->config.r600.max_hw_contexts = 4;
-+ rdev->config.r600.max_gs_threads = 4;
-+ rdev->config.r600.sx_max_export_size = 128;
-+ rdev->config.r600.sx_max_export_pos_size = 16;
-+ rdev->config.r600.sx_max_export_smx_size = 128;
-+ rdev->config.r600.sq_num_cf_insts = 1;
-+ break;
-+ case CHIP_RV670:
-+ rdev->config.r600.max_pipes = 4;
-+ rdev->config.r600.max_tile_pipes = 4;
-+ rdev->config.r600.max_simds = 4;
-+ rdev->config.r600.max_backends = 4;
-+ rdev->config.r600.max_gprs = 192;
-+ rdev->config.r600.max_threads = 192;
-+ rdev->config.r600.max_stack_entries = 256;
-+ rdev->config.r600.max_hw_contexts = 8;
-+ rdev->config.r600.max_gs_threads = 16;
-+ rdev->config.r600.sx_max_export_size = 128;
-+ rdev->config.r600.sx_max_export_pos_size = 16;
-+ rdev->config.r600.sx_max_export_smx_size = 128;
-+ rdev->config.r600.sq_num_cf_insts = 2;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ /* Initialize HDP */
-+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
-+ WREG32((0x2c14 + j), 0x00000000);
-+ WREG32((0x2c18 + j), 0x00000000);
-+ WREG32((0x2c1c + j), 0x00000000);
-+ WREG32((0x2c20 + j), 0x00000000);
-+ WREG32((0x2c24 + j), 0x00000000);
-+ }
-+
-+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
-+
-+ /* Setup tiling */
-+ tiling_config = 0;
-+ ramcfg = RREG32(RAMCFG);
-+ switch (rdev->config.r600.max_tile_pipes) {
-+ case 1:
-+ tiling_config |= PIPE_TILING(0);
-+ break;
-+ case 2:
-+ tiling_config |= PIPE_TILING(1);
-+ break;
-+ case 4:
-+ tiling_config |= PIPE_TILING(2);
-+ break;
-+ case 8:
-+ tiling_config |= PIPE_TILING(3);
-+ break;
-+ default:
-+ break;
-+ }
-+ tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
-+ tiling_config |= GROUP_SIZE(0);
-+ tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
-+ if (tmp > 3) {
-+ tiling_config |= ROW_TILING(3);
-+ tiling_config |= SAMPLE_SPLIT(3);
-+ } else {
-+ tiling_config |= ROW_TILING(tmp);
-+ tiling_config |= SAMPLE_SPLIT(tmp);
-+ }
-+ tiling_config |= BANK_SWAPS(1);
-+ tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
-+ rdev->config.r600.max_backends,
-+ (0xff << rdev->config.r600.max_backends) & 0xff);
-+ tiling_config |= BACKEND_MAP(tmp);
-+ WREG32(GB_TILING_CONFIG, tiling_config);
-+ WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
-+ WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
-+
-+ tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-+ WREG32(CC_RB_BACKEND_DISABLE, tmp);
-+
-+ /* Setup pipes */
-+ tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
-+ tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
-+ WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
-+ WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
-+
-+ tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
-+ WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
-+ WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
-+
-+ /* Setup some CP states */
-+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
-+ WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
-+
-+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
-+ SYNC_WALKER | SYNC_ALIGNER));
-+ /* Setup various GPU states */
-+ if (rdev->family == CHIP_RV670)
-+ WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
-+
-+ tmp = RREG32(SX_DEBUG_1);
-+ tmp |= SMX_EVENT_RELEASE;
-+ if ((rdev->family > CHIP_R600))
-+ tmp |= ENABLE_NEW_SMX_ADDRESS;
-+ WREG32(SX_DEBUG_1, tmp);
-+
-+ if (((rdev->family) == CHIP_R600) ||
-+ ((rdev->family) == CHIP_RV630) ||
-+ ((rdev->family) == CHIP_RV610) ||
-+ ((rdev->family) == CHIP_RV620) ||
-+ ((rdev->family) == CHIP_RS780)) {
-+ WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
-+ } else {
-+ WREG32(DB_DEBUG, 0);
-+ }
-+ WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
-+ DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
-+
-+ WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
-+ WREG32(VGT_NUM_INSTANCES, 0);
-+
-+ WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
-+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
-+
-+ tmp = RREG32(SQ_MS_FIFO_SIZES);
-+ if (((rdev->family) == CHIP_RV610) ||
-+ ((rdev->family) == CHIP_RV620) ||
-+ ((rdev->family) == CHIP_RS780)) {
-+ tmp = (CACHE_FIFO_SIZE(0xa) |
-+ FETCH_FIFO_HIWATER(0xa) |
-+ DONE_FIFO_HIWATER(0xe0) |
-+ ALU_UPDATE_FIFO_HIWATER(0x8));
-+ } else if (((rdev->family) == CHIP_R600) ||
-+ ((rdev->family) == CHIP_RV630)) {
-+ tmp &= ~DONE_FIFO_HIWATER(0xff);
-+ tmp |= DONE_FIFO_HIWATER(0x4);
-+ }
-+ WREG32(SQ_MS_FIFO_SIZES, tmp);
-+
-+ /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
-+ * should be adjusted as needed by the 2D/3D drivers. This just sets default values
-+ */
-+ sq_config = RREG32(SQ_CONFIG);
-+ sq_config &= ~(PS_PRIO(3) |
-+ VS_PRIO(3) |
-+ GS_PRIO(3) |
-+ ES_PRIO(3));
-+ sq_config |= (DX9_CONSTS |
-+ VC_ENABLE |
-+ PS_PRIO(0) |
-+ VS_PRIO(1) |
-+ GS_PRIO(2) |
-+ ES_PRIO(3));
-+
-+ if ((rdev->family) == CHIP_R600) {
-+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
-+ NUM_VS_GPRS(124) |
-+ NUM_CLAUSE_TEMP_GPRS(4));
-+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
-+ NUM_ES_GPRS(0));
-+ sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
-+ NUM_VS_THREADS(48) |
-+ NUM_GS_THREADS(4) |
-+ NUM_ES_THREADS(4));
-+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
-+ NUM_VS_STACK_ENTRIES(128));
-+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
-+ NUM_ES_STACK_ENTRIES(0));
-+ } else if (((rdev->family) == CHIP_RV610) ||
-+ ((rdev->family) == CHIP_RV620) ||
-+ ((rdev->family) == CHIP_RS780)) {
-+ /* no vertex cache */
-+ sq_config &= ~VC_ENABLE;
-+
-+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
-+ NUM_VS_GPRS(44) |
-+ NUM_CLAUSE_TEMP_GPRS(2));
-+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
-+ NUM_ES_GPRS(17));
-+ sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
-+ NUM_VS_THREADS(78) |
-+ NUM_GS_THREADS(4) |
-+ NUM_ES_THREADS(31));
-+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
-+ NUM_VS_STACK_ENTRIES(40));
-+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
-+ NUM_ES_STACK_ENTRIES(16));
-+ } else if (((rdev->family) == CHIP_RV630) ||
-+ ((rdev->family) == CHIP_RV635)) {
-+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
-+ NUM_VS_GPRS(44) |
-+ NUM_CLAUSE_TEMP_GPRS(2));
-+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
-+ NUM_ES_GPRS(18));
-+ sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
-+ NUM_VS_THREADS(78) |
-+ NUM_GS_THREADS(4) |
-+ NUM_ES_THREADS(31));
-+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
-+ NUM_VS_STACK_ENTRIES(40));
-+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
-+ NUM_ES_STACK_ENTRIES(16));
-+ } else if ((rdev->family) == CHIP_RV670) {
-+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
-+ NUM_VS_GPRS(44) |
-+ NUM_CLAUSE_TEMP_GPRS(2));
-+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
-+ NUM_ES_GPRS(17));
-+ sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
-+ NUM_VS_THREADS(78) |
-+ NUM_GS_THREADS(4) |
-+ NUM_ES_THREADS(31));
-+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
-+ NUM_VS_STACK_ENTRIES(64));
-+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
-+ NUM_ES_STACK_ENTRIES(64));
-+ }
-+
-+ WREG32(SQ_CONFIG, sq_config);
-+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
-+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
-+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
-+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
-+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
-+
-+ if (((rdev->family) == CHIP_RV610) ||
-+ ((rdev->family) == CHIP_RV620) ||
-+ ((rdev->family) == CHIP_RS780)) {
-+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
-+ } else {
-+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
-+ }
-+
-+ /* More default values. 2D/3D driver should adjust as needed */
-+ WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
-+ S1_X(0x4) | S1_Y(0xc)));
-+ WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
-+ S1_X(0x2) | S1_Y(0x2) |
-+ S2_X(0xa) | S2_Y(0x6) |
-+ S3_X(0x6) | S3_Y(0xa)));
-+ WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
-+ S1_X(0x4) | S1_Y(0xc) |
-+ S2_X(0x1) | S2_Y(0x6) |
-+ S3_X(0xa) | S3_Y(0xe)));
-+ WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
-+ S5_X(0x0) | S5_Y(0x0) |
-+ S6_X(0xb) | S6_Y(0x4) |
-+ S7_X(0x7) | S7_Y(0x8)));
-+
-+ WREG32(VGT_STRMOUT_EN, 0);
-+ tmp = rdev->config.r600.max_pipes * 16;
-+ switch (rdev->family) {
-+ case CHIP_RV610:
-+ case CHIP_RS780:
-+ case CHIP_RV620:
-+ tmp += 32;
-+ break;
-+ case CHIP_RV670:
-+ tmp += 128;
-+ break;
-+ default:
-+ break;
-+ }
-+ if (tmp > 256) {
-+ tmp = 256;
-+ }
-+ WREG32(VGT_ES_PER_GS, 128);
-+ WREG32(VGT_GS_PER_ES, tmp);
-+ WREG32(VGT_GS_PER_VS, 2);
-+ WREG32(VGT_GS_VERTEX_REUSE, 16);
-+
-+ /* more default values. 2D/3D driver should adjust as needed */
-+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
-+ WREG32(VGT_STRMOUT_EN, 0);
-+ WREG32(SX_MISC, 0);
-+ WREG32(PA_SC_MODE_CNTL, 0);
-+ WREG32(PA_SC_AA_CONFIG, 0);
-+ WREG32(PA_SC_LINE_STIPPLE, 0);
-+ WREG32(SPI_INPUT_Z, 0);
-+ WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
-+ WREG32(CB_COLOR7_FRAG, 0);
-+
-+ /* Clear render buffer base addresses */
-+ WREG32(CB_COLOR0_BASE, 0);
-+ WREG32(CB_COLOR1_BASE, 0);
-+ WREG32(CB_COLOR2_BASE, 0);
-+ WREG32(CB_COLOR3_BASE, 0);
-+ WREG32(CB_COLOR4_BASE, 0);
-+ WREG32(CB_COLOR5_BASE, 0);
-+ WREG32(CB_COLOR6_BASE, 0);
-+ WREG32(CB_COLOR7_BASE, 0);
-+ WREG32(CB_COLOR7_FRAG, 0);
-+
-+ switch (rdev->family) {
-+ case CHIP_RV610:
-+ case CHIP_RS780:
-+ case CHIP_RV620:
-+ tmp = TC_L2_SIZE(8);
-+ break;
-+ case CHIP_RV630:
-+ case CHIP_RV635:
-+ tmp = TC_L2_SIZE(4);
-+ break;
-+ case CHIP_R600:
-+ tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
-+ break;
-+ default:
-+ tmp = TC_L2_SIZE(0);
-+ break;
-+ }
-+ WREG32(TC_CNTL, tmp);
-+
-+ tmp = RREG32(HDP_HOST_PATH_CNTL);
-+ WREG32(HDP_HOST_PATH_CNTL, tmp);
-+
-+ tmp = RREG32(ARB_POP);
-+ tmp |= ENABLE_TC128;
-+ WREG32(ARB_POP, tmp);
-+
-+ WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
-+ WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
-+ NUM_CLIP_SEQ(3)));
-+ WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
-+}
-+
-+
- /*
- * Indirect registers accessor
- */
--uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg)
-+u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
-+{
-+ u32 r;
-+
-+ WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
-+ (void)RREG32(PCIE_PORT_INDEX);
-+ r = RREG32(PCIE_PORT_DATA);
-+ return r;
-+}
-+
-+void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-+{
-+ WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
-+ (void)RREG32(PCIE_PORT_INDEX);
-+ WREG32(PCIE_PORT_DATA, (v));
-+ (void)RREG32(PCIE_PORT_DATA);
-+}
-+
-+
-+/*
-+ * CP & Ring
-+ */
-+void r600_cp_stop(struct radeon_device *rdev)
-+{
-+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
-+}
-+
-+int r600_cp_init_microcode(struct radeon_device *rdev)
-+{
-+ struct platform_device *pdev;
-+ const char *chip_name;
-+ size_t pfp_req_size, me_req_size;
-+ char fw_name[30];
-+ int err;
-+
-+ DRM_DEBUG("\n");
-+
-+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
-+ err = IS_ERR(pdev);
-+ if (err) {
-+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
-+ return -EINVAL;
-+ }
-+
-+ switch (rdev->family) {
-+ case CHIP_R600: chip_name = "R600"; break;
-+ case CHIP_RV610: chip_name = "RV610"; break;
-+ case CHIP_RV630: chip_name = "RV630"; break;
-+ case CHIP_RV620: chip_name = "RV620"; break;
-+ case CHIP_RV635: chip_name = "RV635"; break;
-+ case CHIP_RV670: chip_name = "RV670"; break;
-+ case CHIP_RS780:
-+ case CHIP_RS880: chip_name = "RS780"; break;
-+ case CHIP_RV770: chip_name = "RV770"; break;
-+ case CHIP_RV730:
-+ case CHIP_RV740: chip_name = "RV730"; break;
-+ case CHIP_RV710: chip_name = "RV710"; break;
-+ default: BUG();
-+ }
-+
-+ if (rdev->family >= CHIP_RV770) {
-+ pfp_req_size = R700_PFP_UCODE_SIZE * 4;
-+ me_req_size = R700_PM4_UCODE_SIZE * 4;
-+ } else {
-+ pfp_req_size = PFP_UCODE_SIZE * 4;
-+ me_req_size = PM4_UCODE_SIZE * 12;
-+ }
-+
-+ DRM_INFO("Loading %s CP Microcode\n", chip_name);
-+
-+ snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
-+ err = reject_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
-+ if (err)
-+ goto out;
-+ if (rdev->pfp_fw->size != pfp_req_size) {
-+ printk(KERN_ERR
-+ "r600_cp: Bogus length %zu in firmware \"%s\"\n",
-+ rdev->pfp_fw->size, fw_name);
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
-+ err = reject_firmware(&rdev->me_fw, fw_name, &pdev->dev);
-+ if (err)
-+ goto out;
-+ if (rdev->me_fw->size != me_req_size) {
-+ printk(KERN_ERR
-+ "r600_cp: Bogus length %zu in firmware \"%s\"\n",
-+ rdev->me_fw->size, fw_name);
-+ err = -EINVAL;
-+ }
-+out:
-+ platform_device_unregister(pdev);
-+
-+ if (err) {
-+ if (err != -EINVAL)
-+ printk(KERN_ERR
-+ "r600_cp: Failed to load firmware \"%s\"\n",
-+ fw_name);
-+ release_firmware(rdev->pfp_fw);
-+ rdev->pfp_fw = NULL;
-+ release_firmware(rdev->me_fw);
-+ rdev->me_fw = NULL;
-+ }
-+ return err;
-+}
-+
-+static int r600_cp_load_microcode(struct radeon_device *rdev)
-+{
-+ const __be32 *fw_data;
-+ int i;
-+
-+ if (!rdev->me_fw || !rdev->pfp_fw)
-+ return -EINVAL;
-+
-+ r600_cp_stop(rdev);
-+
-+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
-+
-+ /* Reset cp */
-+ WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
-+ RREG32(GRBM_SOFT_RESET);
-+ mdelay(15);
-+ WREG32(GRBM_SOFT_RESET, 0);
-+
-+ WREG32(CP_ME_RAM_WADDR, 0);
-+
-+ fw_data = (const __be32 *)rdev->me_fw->data;
-+ WREG32(CP_ME_RAM_WADDR, 0);
-+ for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
-+ WREG32(CP_ME_RAM_DATA,
-+ be32_to_cpup(fw_data++));
-+
-+ fw_data = (const __be32 *)rdev->pfp_fw->data;
-+ WREG32(CP_PFP_UCODE_ADDR, 0);
-+ for (i = 0; i < PFP_UCODE_SIZE; i++)
-+ WREG32(CP_PFP_UCODE_DATA,
-+ be32_to_cpup(fw_data++));
-+
-+ WREG32(CP_PFP_UCODE_ADDR, 0);
-+ WREG32(CP_ME_RAM_WADDR, 0);
-+ WREG32(CP_ME_RAM_RADDR, 0);
-+ return 0;
-+}
-+
-+int r600_cp_start(struct radeon_device *rdev)
-+{
-+ int r;
-+ uint32_t cp_me;
-+
-+ r = radeon_ring_lock(rdev, 7);
-+ if (r) {
-+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
-+ return r;
-+ }
-+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
-+ radeon_ring_write(rdev, 0x1);
-+ if (rdev->family < CHIP_RV770) {
-+ radeon_ring_write(rdev, 0x3);
-+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
-+ } else {
-+ radeon_ring_write(rdev, 0x0);
-+ radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
-+ }
-+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
-+ radeon_ring_write(rdev, 0);
-+ radeon_ring_write(rdev, 0);
-+ radeon_ring_unlock_commit(rdev);
-+
-+ cp_me = 0xff;
-+ WREG32(R_0086D8_CP_ME_CNTL, cp_me);
-+ return 0;
-+}
-+
-+int r600_cp_resume(struct radeon_device *rdev)
-+{
-+ u32 tmp;
-+ u32 rb_bufsz;
-+ int r;
-+
-+ /* Reset cp */
-+ WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
-+ RREG32(GRBM_SOFT_RESET);
-+ mdelay(15);
-+ WREG32(GRBM_SOFT_RESET, 0);
-+
-+ /* Set ring buffer size */
-+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
-+#ifdef __BIG_ENDIAN
-+ WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
-+ (drm_order(4096/8) << 8) | rb_bufsz);
-+#else
-+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
-+#endif
-+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
-+
-+ /* Set the write pointer delay */
-+ WREG32(CP_RB_WPTR_DELAY, 0);
-+
-+ /* Initialize the ring buffer's read and write pointers */
-+ tmp = RREG32(CP_RB_CNTL);
-+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
-+ WREG32(CP_RB_RPTR_WR, 0);
-+ WREG32(CP_RB_WPTR, 0);
-+ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
-+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
-+ mdelay(1);
-+ WREG32(CP_RB_CNTL, tmp);
-+
-+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
-+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
-+
-+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
-+ rdev->cp.wptr = RREG32(CP_RB_WPTR);
-+
-+ r600_cp_start(rdev);
-+ rdev->cp.ready = true;
-+ r = radeon_ring_test(rdev);
-+ if (r) {
-+ rdev->cp.ready = false;
-+ return r;
-+ }
-+ return 0;
-+}
-+
-+void r600_cp_commit(struct radeon_device *rdev)
-+{
-+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
-+ (void)RREG32(CP_RB_WPTR);
-+}
-+
-+void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
-+{
-+ u32 rb_bufsz;
-+
-+ /* Align ring size */
-+ rb_bufsz = drm_order(ring_size / 8);
-+ ring_size = (1 << (rb_bufsz + 1)) * 4;
-+ rdev->cp.ring_size = ring_size;
-+ rdev->cp.align_mask = 16 - 1;
-+}
-+
-+
-+/*
-+ * GPU scratch registers helpers function.
-+ */
-+void r600_scratch_init(struct radeon_device *rdev)
-+{
-+ int i;
-+
-+ rdev->scratch.num_reg = 7;
-+ for (i = 0; i < rdev->scratch.num_reg; i++) {
-+ rdev->scratch.free[i] = true;
-+ rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
-+ }
-+}
-+
-+int r600_ring_test(struct radeon_device *rdev)
-+{
-+ uint32_t scratch;
-+ uint32_t tmp = 0;
-+ unsigned i;
-+ int r;
-+
-+ r = radeon_scratch_get(rdev, &scratch);
-+ if (r) {
-+ DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
-+ return r;
-+ }
-+ WREG32(scratch, 0xCAFEDEAD);
-+ r = radeon_ring_lock(rdev, 3);
-+ if (r) {
-+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
-+ radeon_scratch_free(rdev, scratch);
-+ return r;
-+ }
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-+ radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-+ radeon_ring_write(rdev, 0xDEADBEEF);
-+ radeon_ring_unlock_commit(rdev);
-+ for (i = 0; i < rdev->usec_timeout; i++) {
-+ tmp = RREG32(scratch);
-+ if (tmp == 0xDEADBEEF)
-+ break;
-+ DRM_UDELAY(1);
-+ }
-+ if (i < rdev->usec_timeout) {
-+ DRM_INFO("ring test succeeded in %d usecs\n", i);
-+ } else {
-+ DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
-+ scratch, tmp);
-+ r = -EINVAL;
-+ }
-+ radeon_scratch_free(rdev, scratch);
-+ return r;
-+}
-+
-+/*
-+ * Writeback
-+ */
-+int r600_wb_init(struct radeon_device *rdev)
-+{
-+ int r;
-+
-+ if (rdev->wb.wb_obj == NULL) {
-+ r = radeon_object_create(rdev, NULL, 4096,
-+ true,
-+ RADEON_GEM_DOMAIN_GTT,
-+ false, &rdev->wb.wb_obj);
-+ if (r) {
-+ DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
-+ return r;
-+ }
-+ r = radeon_object_pin(rdev->wb.wb_obj,
-+ RADEON_GEM_DOMAIN_GTT,
-+ &rdev->wb.gpu_addr);
-+ if (r) {
-+ DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
-+ return r;
-+ }
-+ r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-+ if (r) {
-+ DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
-+ return r;
-+ }
-+ }
-+ WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
-+ WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
-+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
-+ WREG32(SCRATCH_UMSK, 0xff);
-+ return 0;
-+}
-+
-+void r600_wb_fini(struct radeon_device *rdev)
-+{
-+ if (rdev->wb.wb_obj) {
-+ radeon_object_kunmap(rdev->wb.wb_obj);
-+ radeon_object_unpin(rdev->wb.wb_obj);
-+ radeon_object_unref(&rdev->wb.wb_obj);
-+ rdev->wb.wb = NULL;
-+ rdev->wb.wb_obj = NULL;
-+ }
-+}
-+
-+
-+/*
-+ * CS
-+ */
-+void r600_fence_ring_emit(struct radeon_device *rdev,
-+ struct radeon_fence *fence)
-+{
-+ /* Emit fence sequence & fire IRQ */
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-+ radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-+ radeon_ring_write(rdev, fence->seq);
-+}
-+
-+int r600_copy_dma(struct radeon_device *rdev,
-+ uint64_t src_offset,
-+ uint64_t dst_offset,
-+ unsigned num_pages,
-+ struct radeon_fence *fence)
-+{
-+ /* FIXME: implement */
-+ return 0;
-+}
-+
-+int r600_copy_blit(struct radeon_device *rdev,
-+ uint64_t src_offset, uint64_t dst_offset,
-+ unsigned num_pages, struct radeon_fence *fence)
-+{
-+ r600_blit_prepare_copy(rdev, num_pages * 4096);
-+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096);
-+ r600_blit_done_copy(rdev, fence);
-+ return 0;
-+}
-+
-+int r600_irq_process(struct radeon_device *rdev)
-+{
-+ /* FIXME: implement */
-+ return 0;
-+}
-+
-+int r600_irq_set(struct radeon_device *rdev)
-+{
-+ /* FIXME: implement */
-+ return 0;
-+}
-+
-+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
-+ uint32_t tiling_flags, uint32_t pitch,
-+ uint32_t offset, uint32_t obj_size)
-+{
-+ /* FIXME: implement */
-+ return 0;
-+}
-+
-+void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
-+{
-+ /* FIXME: implement */
-+}
-+
-+
-+bool r600_card_posted(struct radeon_device *rdev)
-+{
-+ uint32_t reg;
-+
-+ /* first check CRTCs */
-+ reg = RREG32(D1CRTC_CONTROL) |
-+ RREG32(D2CRTC_CONTROL);
-+ if (reg & CRTC_EN)
-+ return true;
-+
-+ /* then check MEM_SIZE, in case the crtcs are off */
-+ if (RREG32(CONFIG_MEMSIZE))
-+ return true;
-+
-+ return false;
-+}
-+
-+int r600_resume(struct radeon_device *rdev)
-+{
-+ int r;
-+
-+ r600_gpu_reset(rdev);
-+ r600_mc_resume(rdev);
-+ r = r600_pcie_gart_enable(rdev);
-+ if (r)
-+ return r;
-+ r600_gpu_init(rdev);
-+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
-+ if (r)
-+ return r;
-+ r = r600_cp_load_microcode(rdev);
-+ if (r)
-+ return r;
-+ r = r600_cp_resume(rdev);
-+ if (r)
-+ return r;
-+ r = r600_wb_init(rdev);
-+ if (r)
-+ return r;
-+ return 0;
-+}
-+
-+int r600_suspend(struct radeon_device *rdev)
-+{
-+ /* FIXME: we should wait for ring to be empty */
-+ r600_cp_stop(rdev);
-+ return 0;
-+}
-+
-+/* Plan is to move initialization in that function and use
-+ * helper function so that radeon_device_init pretty much
-+ * do nothing more than calling asic specific function. This
-+ * should also allow to remove a bunch of callback function
-+ * like vram_info.
-+ */
-+int r600_init(struct radeon_device *rdev)
- {
-- uint32_t r;
-+ int r;
-
-- WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
-- (void)RREG32(R600_PCIE_PORT_INDEX);
-- r = RREG32(R600_PCIE_PORT_DATA);
-+ rdev->new_init_path = true;
-+ r = radeon_dummy_page_init(rdev);
-+ if (r)
-+ return r;
-+ if (r600_debugfs_mc_info_init(rdev)) {
-+ DRM_ERROR("Failed to register debugfs file for mc !\n");
-+ }
-+ /* This don't do much */
-+ r = radeon_gem_init(rdev);
-+ if (r)
-+ return r;
-+ /* Read BIOS */
-+ if (!radeon_get_bios(rdev)) {
-+ if (ASIC_IS_AVIVO(rdev))
-+ return -EINVAL;
-+ }
-+ /* Must be an ATOMBIOS */
-+ if (!rdev->is_atom_bios)
-+ return -EINVAL;
-+ r = radeon_atombios_init(rdev);
-+ if (r)
-+ return r;
-+ /* Post card if necessary */
-+ if (!r600_card_posted(rdev) && rdev->bios) {
-+ DRM_INFO("GPU not posted. posting now...\n");
-+ atom_asic_init(rdev->mode_info.atom_context);
-+ }
-+ /* Initialize scratch registers */
-+ r600_scratch_init(rdev);
-+ /* Initialize surface registers */
-+ radeon_surface_init(rdev);
-+ r = radeon_clocks_init(rdev);
-+ if (r)
-+ return r;
-+ /* Fence driver */
-+ r = radeon_fence_driver_init(rdev);
-+ if (r)
-+ return r;
-+ r = r600_mc_init(rdev);
-+ if (r) {
-+ if (rdev->flags & RADEON_IS_AGP) {
-+ /* Retry with disabling AGP */
-+ r600_fini(rdev);
-+ rdev->flags &= ~RADEON_IS_AGP;
-+ return r600_init(rdev);
-+ }
-+ return r;
-+ }
-+ /* Memory manager */
-+ r = radeon_object_init(rdev);
-+ if (r)
-+ return r;
-+ rdev->cp.ring_obj = NULL;
-+ r600_ring_init(rdev, 1024 * 1024);
-+
-+ if (!rdev->me_fw || !rdev->pfp_fw) {
-+ r = r600_cp_init_microcode(rdev);
-+ if (r) {
-+ DRM_ERROR("Failed to load firmware!\n");
-+ return r;
-+ }
-+ }
-+
-+ r = r600_resume(rdev);
-+ if (r) {
-+ if (rdev->flags & RADEON_IS_AGP) {
-+ /* Retry with disabling AGP */
-+ r600_fini(rdev);
-+ rdev->flags &= ~RADEON_IS_AGP;
-+ return r600_init(rdev);
-+ }
-+ return r;
-+ }
-+ r = radeon_ib_pool_init(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
-+ return r;
-+ }
-+ r = r600_blit_init(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled blitter (%d).\n", r);
-+ return r;
-+ }
-+ r = radeon_ib_test(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
-+ return r;
-+ }
-+ return 0;
-+}
-+
-+void r600_fini(struct radeon_device *rdev)
-+{
-+ /* Suspend operations */
-+ r600_suspend(rdev);
-+
-+ r600_blit_fini(rdev);
-+ radeon_ring_fini(rdev);
-+ r600_pcie_gart_disable(rdev);
-+ radeon_gart_table_vram_free(rdev);
-+ radeon_gart_fini(rdev);
-+ radeon_gem_fini(rdev);
-+ radeon_fence_driver_fini(rdev);
-+ radeon_clocks_fini(rdev);
-+#if __OS_HAS_AGP
-+ if (rdev->flags & RADEON_IS_AGP)
-+ radeon_agp_fini(rdev);
-+#endif
-+ radeon_object_fini(rdev);
-+ if (rdev->is_atom_bios)
-+ radeon_atombios_fini(rdev);
-+ else
-+ radeon_combios_fini(rdev);
-+ kfree(rdev->bios);
-+ rdev->bios = NULL;
-+ radeon_dummy_page_fini(rdev);
-+}
-+
-+
-+/*
-+ * CS stuff
-+ */
-+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-+{
-+ /* FIXME: implement */
-+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-+ radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
-+ radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
-+ radeon_ring_write(rdev, ib->length_dw);
-+}
-+
-+int r600_ib_test(struct radeon_device *rdev)
-+{
-+ struct radeon_ib *ib;
-+ uint32_t scratch;
-+ uint32_t tmp = 0;
-+ unsigned i;
-+ int r;
-+
-+ r = radeon_scratch_get(rdev, &scratch);
-+ if (r) {
-+ DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
-+ return r;
-+ }
-+ WREG32(scratch, 0xCAFEDEAD);
-+ r = radeon_ib_get(rdev, &ib);
-+ if (r) {
-+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
-+ return r;
-+ }
-+ ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
-+ ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-+ ib->ptr[2] = 0xDEADBEEF;
-+ ib->ptr[3] = PACKET2(0);
-+ ib->ptr[4] = PACKET2(0);
-+ ib->ptr[5] = PACKET2(0);
-+ ib->ptr[6] = PACKET2(0);
-+ ib->ptr[7] = PACKET2(0);
-+ ib->ptr[8] = PACKET2(0);
-+ ib->ptr[9] = PACKET2(0);
-+ ib->ptr[10] = PACKET2(0);
-+ ib->ptr[11] = PACKET2(0);
-+ ib->ptr[12] = PACKET2(0);
-+ ib->ptr[13] = PACKET2(0);
-+ ib->ptr[14] = PACKET2(0);
-+ ib->ptr[15] = PACKET2(0);
-+ ib->length_dw = 16;
-+ r = radeon_ib_schedule(rdev, ib);
-+ if (r) {
-+ radeon_scratch_free(rdev, scratch);
-+ radeon_ib_free(rdev, &ib);
-+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
-+ return r;
-+ }
-+ r = radeon_fence_wait(ib->fence, false);
-+ if (r) {
-+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
-+ return r;
-+ }
-+ for (i = 0; i < rdev->usec_timeout; i++) {
-+ tmp = RREG32(scratch);
-+ if (tmp == 0xDEADBEEF)
-+ break;
-+ DRM_UDELAY(1);
-+ }
-+ if (i < rdev->usec_timeout) {
-+ DRM_INFO("ib test succeeded in %u usecs\n", i);
-+ } else {
-+ DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
-+ scratch, tmp);
-+ r = -EINVAL;
-+ }
-+ radeon_scratch_free(rdev, scratch);
-+ radeon_ib_free(rdev, &ib);
- return r;
- }
-
--void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-+
-+
-+
-+/*
-+ * Debugfs info
-+ */
-+#if defined(CONFIG_DEBUG_FS)
-+
-+static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
- {
-- WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
-- (void)RREG32(R600_PCIE_PORT_INDEX);
-- WREG32(R600_PCIE_PORT_DATA, (v));
-- (void)RREG32(R600_PCIE_PORT_DATA);
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ uint32_t rdp, wdp;
-+ unsigned count, i, j;
-+
-+ radeon_ring_free_size(rdev);
-+ rdp = RREG32(CP_RB_RPTR);
-+ wdp = RREG32(CP_RB_WPTR);
-+ count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
-+ seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
-+ seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
-+ seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
-+ seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
-+ seq_printf(m, "%u dwords in ring\n", count);
-+ for (j = 0; j <= count; j++) {
-+ i = (rdp + j) & rdev->cp.ptr_mask;
-+ seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
-+ }
-+ return 0;
-+}
-+
-+static int r600_debugfs_mc_info(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *) m->private;
-+ struct drm_device *dev = node->minor->dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+
-+ DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
-+ DREG32_SYS(m, rdev, VM_L2_STATUS);
-+ return 0;
-+}
-+
-+static struct drm_info_list r600_mc_info_list[] = {
-+ {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
-+ {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
-+};
-+#endif
-+
-+int r600_debugfs_mc_info_init(struct radeon_device *rdev)
-+{
-+#if defined(CONFIG_DEBUG_FS)
-+ return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
-+#else
-+ return 0;
-+#endif
- }
-diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
-new file mode 100644
-index 0000000..c51402e
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r600_blit.c
-@@ -0,0 +1,855 @@
-+/*
-+ * Copyright 2009 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Alex Deucher <alexander.deucher@amd.com>
-+ */
-+#include "drmP.h"
-+#include "drm.h"
-+#include "radeon_drm.h"
-+#include "radeon_drv.h"
-+
-+#include "r600_blit_shaders.h"
-+
-+#define DI_PT_RECTLIST 0x11
-+#define DI_INDEX_SIZE_16_BIT 0x0
-+#define DI_SRC_SEL_AUTO_INDEX 0x2
-+
-+#define FMT_8 0x1
-+#define FMT_5_6_5 0x8
-+#define FMT_8_8_8_8 0x1a
-+#define COLOR_8 0x1
-+#define COLOR_5_6_5 0x8
-+#define COLOR_8_8_8_8 0x1a
-+
-+static inline void
-+set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
-+{
-+ u32 cb_color_info;
-+ int pitch, slice;
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ h = (h + 7) & ~7;
-+ if (h < 8)
-+ h = 8;
-+
-+ cb_color_info = ((format << 2) | (1 << 27));
-+ pitch = (w / 8) - 1;
-+ slice = ((w * h) / 64) - 1;
-+
-+ if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
-+ BEGIN_RING(21 + 2);
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(gpu_addr >> 8);
-+ OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
-+ OUT_RING(2 << 0);
-+ } else {
-+ BEGIN_RING(21);
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(gpu_addr >> 8);
-+ }
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING((pitch << 0) | (slice << 10));
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(0);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(cb_color_info);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(0);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(0);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(0);
-+
-+ ADVANCE_RING();
-+}
-+
-+static inline void
-+cp_set_surface_sync(drm_radeon_private_t *dev_priv,
-+ u32 sync_type, u32 size, u64 mc_addr)
-+{
-+ u32 cp_coher_size;
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ if (size == 0xffffffff)
-+ cp_coher_size = 0xffffffff;
-+ else
-+ cp_coher_size = ((size + 255) >> 8);
-+
-+ BEGIN_RING(5);
-+ OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
-+ OUT_RING(sync_type);
-+ OUT_RING(cp_coher_size);
-+ OUT_RING((mc_addr >> 8));
-+ OUT_RING(10); /* poll interval */
-+ ADVANCE_RING();
-+}
-+
-+static inline void
-+set_shaders(struct drm_device *dev)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ u64 gpu_addr;
-+ int shader_size, i;
-+ u32 *vs, *ps;
-+ uint32_t sq_pgm_resources;
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ /* load shaders */
-+ vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
-+ ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
-+
-+ shader_size = r6xx_vs_size;
-+ for (i = 0; i < shader_size; i++)
-+ vs[i] = r6xx_vs[i];
-+ shader_size = r6xx_ps_size;
-+ for (i = 0; i < shader_size; i++)
-+ ps[i] = r6xx_ps[i];
-+
-+ dev_priv->blit_vb->used = 512;
-+
-+ gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset;
-+
-+ /* setup shader regs */
-+ sq_pgm_resources = (1 << 0);
-+
-+ BEGIN_RING(9 + 12);
-+ /* VS */
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(gpu_addr >> 8);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(sq_pgm_resources);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(0);
-+
-+ /* PS */
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING((gpu_addr + 256) >> 8);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(sq_pgm_resources | (1 << 28));
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(2);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
-+ OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING(0);
-+ ADVANCE_RING();
-+
-+ cp_set_surface_sync(dev_priv,
-+ R600_SH_ACTION_ENA, 512, gpu_addr);
-+}
-+
-+static inline void
-+set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
-+{
-+ uint32_t sq_vtx_constant_word2;
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
-+
-+ BEGIN_RING(9);
-+ OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
-+ OUT_RING(0x460);
-+ OUT_RING(gpu_addr & 0xffffffff);
-+ OUT_RING(48 - 1);
-+ OUT_RING(sq_vtx_constant_word2);
-+ OUT_RING(1 << 0);
-+ OUT_RING(0);
-+ OUT_RING(0);
-+ OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30);
-+ ADVANCE_RING();
-+
-+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
-+ cp_set_surface_sync(dev_priv,
-+ R600_TC_ACTION_ENA, 48, gpu_addr);
-+ else
-+ cp_set_surface_sync(dev_priv,
-+ R600_VC_ACTION_ENA, 48, gpu_addr);
-+}
-+
-+static inline void
-+set_tex_resource(drm_radeon_private_t *dev_priv,
-+ int format, int w, int h, int pitch, u64 gpu_addr)
-+{
-+ uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ if (h < 1)
-+ h = 1;
-+
-+ sq_tex_resource_word0 = (1 << 0);
-+ sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
-+ ((w - 1) << 19));
-+
-+ sq_tex_resource_word1 = (format << 26);
-+ sq_tex_resource_word1 |= ((h - 1) << 0);
-+
-+ sq_tex_resource_word4 = ((1 << 14) |
-+ (0 << 16) |
-+ (1 << 19) |
-+ (2 << 22) |
-+ (3 << 25));
-+
-+ BEGIN_RING(9);
-+ OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
-+ OUT_RING(0);
-+ OUT_RING(sq_tex_resource_word0);
-+ OUT_RING(sq_tex_resource_word1);
-+ OUT_RING(gpu_addr >> 8);
-+ OUT_RING(gpu_addr >> 8);
-+ OUT_RING(sq_tex_resource_word4);
-+ OUT_RING(0);
-+ OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30);
-+ ADVANCE_RING();
-+
-+}
-+
-+static inline void
-+set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
-+{
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ BEGIN_RING(12);
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
-+ OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING((x1 << 0) | (y1 << 16));
-+ OUT_RING((x2 << 0) | (y2 << 16));
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
-+ OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
-+ OUT_RING((x2 << 0) | (y2 << 16));
-+
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
-+ OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
-+ OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
-+ OUT_RING((x2 << 0) | (y2 << 16));
-+ ADVANCE_RING();
-+}
-+
-+static inline void
-+draw_auto(drm_radeon_private_t *dev_priv)
-+{
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ BEGIN_RING(10);
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
-+ OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2);
-+ OUT_RING(DI_PT_RECTLIST);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
-+ OUT_RING(DI_INDEX_SIZE_16_BIT);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
-+ OUT_RING(1);
-+
-+ OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
-+ OUT_RING(3);
-+ OUT_RING(DI_SRC_SEL_AUTO_INDEX);
-+
-+ ADVANCE_RING();
-+ COMMIT_RING();
-+}
-+
-+static inline void
-+set_default_state(drm_radeon_private_t *dev_priv)
-+{
-+ int default_state_dw, i;
-+ u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
-+ u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
-+ int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
-+ int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
-+ int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
-+ RING_LOCALS;
-+
-+ switch ((dev_priv->flags & RADEON_FAMILY_MASK)) {
-+ case CHIP_R600:
-+ num_ps_gprs = 192;
-+ num_vs_gprs = 56;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 136;
-+ num_vs_threads = 48;
-+ num_gs_threads = 4;
-+ num_es_threads = 4;
-+ num_ps_stack_entries = 128;
-+ num_vs_stack_entries = 128;
-+ num_gs_stack_entries = 0;
-+ num_es_stack_entries = 0;
-+ break;
-+ case CHIP_RV630:
-+ case CHIP_RV635:
-+ num_ps_gprs = 84;
-+ num_vs_gprs = 36;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 144;
-+ num_vs_threads = 40;
-+ num_gs_threads = 4;
-+ num_es_threads = 4;
-+ num_ps_stack_entries = 40;
-+ num_vs_stack_entries = 40;
-+ num_gs_stack_entries = 32;
-+ num_es_stack_entries = 16;
-+ break;
-+ case CHIP_RV610:
-+ case CHIP_RV620:
-+ case CHIP_RS780:
-+ case CHIP_RS880:
-+ default:
-+ num_ps_gprs = 84;
-+ num_vs_gprs = 36;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 136;
-+ num_vs_threads = 48;
-+ num_gs_threads = 4;
-+ num_es_threads = 4;
-+ num_ps_stack_entries = 40;
-+ num_vs_stack_entries = 40;
-+ num_gs_stack_entries = 32;
-+ num_es_stack_entries = 16;
-+ break;
-+ case CHIP_RV670:
-+ num_ps_gprs = 144;
-+ num_vs_gprs = 40;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 136;
-+ num_vs_threads = 48;
-+ num_gs_threads = 4;
-+ num_es_threads = 4;
-+ num_ps_stack_entries = 40;
-+ num_vs_stack_entries = 40;
-+ num_gs_stack_entries = 32;
-+ num_es_stack_entries = 16;
-+ break;
-+ case CHIP_RV770:
-+ num_ps_gprs = 192;
-+ num_vs_gprs = 56;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 188;
-+ num_vs_threads = 60;
-+ num_gs_threads = 0;
-+ num_es_threads = 0;
-+ num_ps_stack_entries = 256;
-+ num_vs_stack_entries = 256;
-+ num_gs_stack_entries = 0;
-+ num_es_stack_entries = 0;
-+ break;
-+ case CHIP_RV730:
-+ case CHIP_RV740:
-+ num_ps_gprs = 84;
-+ num_vs_gprs = 36;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 188;
-+ num_vs_threads = 60;
-+ num_gs_threads = 0;
-+ num_es_threads = 0;
-+ num_ps_stack_entries = 128;
-+ num_vs_stack_entries = 128;
-+ num_gs_stack_entries = 0;
-+ num_es_stack_entries = 0;
-+ break;
-+ case CHIP_RV710:
-+ num_ps_gprs = 192;
-+ num_vs_gprs = 56;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 144;
-+ num_vs_threads = 48;
-+ num_gs_threads = 0;
-+ num_es_threads = 0;
-+ num_ps_stack_entries = 128;
-+ num_vs_stack_entries = 128;
-+ num_gs_stack_entries = 0;
-+ num_es_stack_entries = 0;
-+ break;
-+ }
-+
-+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
-+ sq_config = 0;
-+ else
-+ sq_config = R600_VC_ENABLE;
-+
-+ sq_config |= (R600_DX9_CONSTS |
-+ R600_ALU_INST_PREFER_VECTOR |
-+ R600_PS_PRIO(0) |
-+ R600_VS_PRIO(1) |
-+ R600_GS_PRIO(2) |
-+ R600_ES_PRIO(3));
-+
-+ sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) |
-+ R600_NUM_VS_GPRS(num_vs_gprs) |
-+ R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
-+ sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) |
-+ R600_NUM_ES_GPRS(num_es_gprs));
-+ sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) |
-+ R600_NUM_VS_THREADS(num_vs_threads) |
-+ R600_NUM_GS_THREADS(num_gs_threads) |
-+ R600_NUM_ES_THREADS(num_es_threads));
-+ sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
-+ R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
-+ sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
-+ R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries));
-+
-+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
-+ default_state_dw = r7xx_default_size * 4;
-+ BEGIN_RING(default_state_dw + 10);
-+ for (i = 0; i < default_state_dw; i++)
-+ OUT_RING(r7xx_default_state[i]);
-+ } else {
-+ default_state_dw = r6xx_default_size * 4;
-+ BEGIN_RING(default_state_dw + 10);
-+ for (i = 0; i < default_state_dw; i++)
-+ OUT_RING(r6xx_default_state[i]);
-+ }
-+ OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
-+ OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
-+ /* SQ config */
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6));
-+ OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2);
-+ OUT_RING(sq_config);
-+ OUT_RING(sq_gpr_resource_mgmt_1);
-+ OUT_RING(sq_gpr_resource_mgmt_2);
-+ OUT_RING(sq_thread_resource_mgmt);
-+ OUT_RING(sq_stack_resource_mgmt_1);
-+ OUT_RING(sq_stack_resource_mgmt_2);
-+ ADVANCE_RING();
-+}
-+
-+static inline uint32_t i2f(uint32_t input)
-+{
-+ u32 result, i, exponent, fraction;
-+
-+ if ((input & 0x3fff) == 0)
-+ result = 0; /* 0 is a special case */
-+ else {
-+ exponent = 140; /* exponent biased by 127; */
-+ fraction = (input & 0x3fff) << 10; /* cheat and only
-+ handle numbers below 2^^15 */
-+ for (i = 0; i < 14; i++) {
-+ if (fraction & 0x800000)
-+ break;
-+ else {
-+ fraction = fraction << 1; /* keep
-+ shifting left until top bit = 1 */
-+ exponent = exponent - 1;
-+ }
-+ }
-+ result = exponent << 23 | (fraction & 0x7fffff); /* mask
-+ off top bit; assumed 1 */
-+ }
-+ return result;
-+}
-+
-+
-+int r600_nomm_get_vb(struct drm_device *dev)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ dev_priv->blit_vb = radeon_freelist_get(dev);
-+ if (!dev_priv->blit_vb) {
-+ DRM_ERROR("Unable to allocate vertex buffer for blit\n");
-+ return -EAGAIN;
-+ }
-+ return 0;
-+}
-+
-+void r600_nomm_put_vb(struct drm_device *dev)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+
-+ dev_priv->blit_vb->used = 0;
-+ radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
-+}
-+
-+void *r600_nomm_get_vb_ptr(struct drm_device *dev)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ return (((char *)dev->agp_buffer_map->handle +
-+ dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
-+}
-+
-+int
-+r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ DRM_DEBUG("\n");
-+
-+ r600_nomm_get_vb(dev);
-+
-+ dev_priv->blit_vb->file_priv = file_priv;
-+
-+ set_default_state(dev_priv);
-+ set_shaders(dev);
-+
-+ return 0;
-+}
-+
-+
-+void
-+r600_done_blit_copy(struct drm_device *dev)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ BEGIN_RING(5);
-+ OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
-+ OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
-+ /* wait for 3D idle clean */
-+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
-+ OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
-+ OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
-+
-+ ADVANCE_RING();
-+ COMMIT_RING();
-+
-+ r600_nomm_put_vb(dev);
-+}
-+
-+void
-+r600_blit_copy(struct drm_device *dev,
-+ uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
-+ int size_bytes)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ int max_bytes;
-+ u64 vb_addr;
-+ u32 *vb;
-+
-+ vb = r600_nomm_get_vb_ptr(dev);
-+
-+ if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
-+ max_bytes = 8192;
-+
-+ while (size_bytes) {
-+ int cur_size = size_bytes;
-+ int src_x = src_gpu_addr & 255;
-+ int dst_x = dst_gpu_addr & 255;
-+ int h = 1;
-+ src_gpu_addr = src_gpu_addr & ~255;
-+ dst_gpu_addr = dst_gpu_addr & ~255;
-+
-+ if (!src_x && !dst_x) {
-+ h = (cur_size / max_bytes);
-+ if (h > 8192)
-+ h = 8192;
-+ if (h == 0)
-+ h = 1;
-+ else
-+ cur_size = max_bytes;
-+ } else {
-+ if (cur_size > max_bytes)
-+ cur_size = max_bytes;
-+ if (cur_size > (max_bytes - dst_x))
-+ cur_size = (max_bytes - dst_x);
-+ if (cur_size > (max_bytes - src_x))
-+ cur_size = (max_bytes - src_x);
-+ }
-+
-+ if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
-+
-+ r600_nomm_put_vb(dev);
-+ r600_nomm_get_vb(dev);
-+ if (!dev_priv->blit_vb)
-+ return;
-+ set_shaders(dev);
-+ vb = r600_nomm_get_vb_ptr(dev);
-+ }
-+
-+ vb[0] = i2f(dst_x);
-+ vb[1] = 0;
-+ vb[2] = i2f(src_x);
-+ vb[3] = 0;
-+
-+ vb[4] = i2f(dst_x);
-+ vb[5] = i2f(h);
-+ vb[6] = i2f(src_x);
-+ vb[7] = i2f(h);
-+
-+ vb[8] = i2f(dst_x + cur_size);
-+ vb[9] = i2f(h);
-+ vb[10] = i2f(src_x + cur_size);
-+ vb[11] = i2f(h);
-+
-+ /* src */
-+ set_tex_resource(dev_priv, FMT_8,
-+ src_x + cur_size, h, src_x + cur_size,
-+ src_gpu_addr);
-+
-+ cp_set_surface_sync(dev_priv,
-+ R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
-+
-+ /* dst */
-+ set_render_target(dev_priv, COLOR_8,
-+ dst_x + cur_size, h,
-+ dst_gpu_addr);
-+
-+ /* scissors */
-+ set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h);
-+
-+ /* Vertex buffer setup */
-+ vb_addr = dev_priv->gart_buffers_offset +
-+ dev_priv->blit_vb->offset +
-+ dev_priv->blit_vb->used;
-+ set_vtx_resource(dev_priv, vb_addr);
-+
-+ /* draw */
-+ draw_auto(dev_priv);
-+
-+ cp_set_surface_sync(dev_priv,
-+ R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
-+ cur_size * h, dst_gpu_addr);
-+
-+ vb += 12;
-+ dev_priv->blit_vb->used += 12 * 4;
-+
-+ src_gpu_addr += cur_size * h;
-+ dst_gpu_addr += cur_size * h;
-+ size_bytes -= cur_size * h;
-+ }
-+ } else {
-+ max_bytes = 8192 * 4;
-+
-+ while (size_bytes) {
-+ int cur_size = size_bytes;
-+ int src_x = (src_gpu_addr & 255);
-+ int dst_x = (dst_gpu_addr & 255);
-+ int h = 1;
-+ src_gpu_addr = src_gpu_addr & ~255;
-+ dst_gpu_addr = dst_gpu_addr & ~255;
-+
-+ if (!src_x && !dst_x) {
-+ h = (cur_size / max_bytes);
-+ if (h > 8192)
-+ h = 8192;
-+ if (h == 0)
-+ h = 1;
-+ else
-+ cur_size = max_bytes;
-+ } else {
-+ if (cur_size > max_bytes)
-+ cur_size = max_bytes;
-+ if (cur_size > (max_bytes - dst_x))
-+ cur_size = (max_bytes - dst_x);
-+ if (cur_size > (max_bytes - src_x))
-+ cur_size = (max_bytes - src_x);
-+ }
-+
-+ if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
-+ r600_nomm_put_vb(dev);
-+ r600_nomm_get_vb(dev);
-+ if (!dev_priv->blit_vb)
-+ return;
-+
-+ set_shaders(dev);
-+ vb = r600_nomm_get_vb_ptr(dev);
-+ }
-+
-+ vb[0] = i2f(dst_x / 4);
-+ vb[1] = 0;
-+ vb[2] = i2f(src_x / 4);
-+ vb[3] = 0;
-+
-+ vb[4] = i2f(dst_x / 4);
-+ vb[5] = i2f(h);
-+ vb[6] = i2f(src_x / 4);
-+ vb[7] = i2f(h);
-+
-+ vb[8] = i2f((dst_x + cur_size) / 4);
-+ vb[9] = i2f(h);
-+ vb[10] = i2f((src_x + cur_size) / 4);
-+ vb[11] = i2f(h);
-+
-+ /* src */
-+ set_tex_resource(dev_priv, FMT_8_8_8_8,
-+ (src_x + cur_size) / 4,
-+ h, (src_x + cur_size) / 4,
-+ src_gpu_addr);
-+
-+ cp_set_surface_sync(dev_priv,
-+ R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
-+
-+ /* dst */
-+ set_render_target(dev_priv, COLOR_8_8_8_8,
-+ dst_x + cur_size, h,
-+ dst_gpu_addr);
-+
-+ /* scissors */
-+ set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
-+
-+ /* Vertex buffer setup */
-+ vb_addr = dev_priv->gart_buffers_offset +
-+ dev_priv->blit_vb->offset +
-+ dev_priv->blit_vb->used;
-+ set_vtx_resource(dev_priv, vb_addr);
-+
-+ /* draw */
-+ draw_auto(dev_priv);
-+
-+ cp_set_surface_sync(dev_priv,
-+ R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
-+ cur_size * h, dst_gpu_addr);
-+
-+ vb += 12;
-+ dev_priv->blit_vb->used += 12 * 4;
-+
-+ src_gpu_addr += cur_size * h;
-+ dst_gpu_addr += cur_size * h;
-+ size_bytes -= cur_size * h;
-+ }
-+ }
-+}
-+
-+void
-+r600_blit_swap(struct drm_device *dev,
-+ uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
-+ int sx, int sy, int dx, int dy,
-+ int w, int h, int src_pitch, int dst_pitch, int cpp)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ int cb_format, tex_format;
-+ u64 vb_addr;
-+ u32 *vb;
-+
-+ vb = (u32 *) ((char *)dev->agp_buffer_map->handle +
-+ dev_priv->blit_vb->offset + dev_priv->blit_vb->used);
-+
-+ if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
-+
-+ r600_nomm_put_vb(dev);
-+ r600_nomm_get_vb(dev);
-+ if (!dev_priv->blit_vb)
-+ return;
-+
-+ set_shaders(dev);
-+ vb = r600_nomm_get_vb_ptr(dev);
-+ }
-+
-+ if (cpp == 4) {
-+ cb_format = COLOR_8_8_8_8;
-+ tex_format = FMT_8_8_8_8;
-+ } else if (cpp == 2) {
-+ cb_format = COLOR_5_6_5;
-+ tex_format = FMT_5_6_5;
-+ } else {
-+ cb_format = COLOR_8;
-+ tex_format = FMT_8;
-+ }
-+
-+ vb[0] = i2f(dx);
-+ vb[1] = i2f(dy);
-+ vb[2] = i2f(sx);
-+ vb[3] = i2f(sy);
-+
-+ vb[4] = i2f(dx);
-+ vb[5] = i2f(dy + h);
-+ vb[6] = i2f(sx);
-+ vb[7] = i2f(sy + h);
-+
-+ vb[8] = i2f(dx + w);
-+ vb[9] = i2f(dy + h);
-+ vb[10] = i2f(sx + w);
-+ vb[11] = i2f(sy + h);
-+
-+ /* src */
-+ set_tex_resource(dev_priv, tex_format,
-+ src_pitch / cpp,
-+ sy + h, src_pitch / cpp,
-+ src_gpu_addr);
-+
-+ cp_set_surface_sync(dev_priv,
-+ R600_TC_ACTION_ENA, (src_pitch * (sy + h)), src_gpu_addr);
-+
-+ /* dst */
-+ set_render_target(dev_priv, cb_format,
-+ dst_pitch / cpp, dy + h,
-+ dst_gpu_addr);
-+
-+ /* scissors */
-+ set_scissors(dev_priv, dx, dy, dx + w, dy + h);
-+
-+ /* Vertex buffer setup */
-+ vb_addr = dev_priv->gart_buffers_offset +
-+ dev_priv->blit_vb->offset +
-+ dev_priv->blit_vb->used;
-+ set_vtx_resource(dev_priv, vb_addr);
-+
-+ /* draw */
-+ draw_auto(dev_priv);
-+
-+ cp_set_surface_sync(dev_priv,
-+ R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
-+ dst_pitch * (dy + h), dst_gpu_addr);
-+
-+ dev_priv->blit_vb->used += 12 * 4;
-+}
-diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
-new file mode 100644
-index 0000000..5755647
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
-@@ -0,0 +1,777 @@
-+#include "drmP.h"
-+#include "drm.h"
-+#include "radeon_drm.h"
-+#include "radeon.h"
-+
-+#include "r600d.h"
-+#include "r600_blit_shaders.h"
-+
-+#define DI_PT_RECTLIST 0x11
-+#define DI_INDEX_SIZE_16_BIT 0x0
-+#define DI_SRC_SEL_AUTO_INDEX 0x2
-+
-+#define FMT_8 0x1
-+#define FMT_5_6_5 0x8
-+#define FMT_8_8_8_8 0x1a
-+#define COLOR_8 0x1
-+#define COLOR_5_6_5 0x8
-+#define COLOR_8_8_8_8 0x1a
-+
-+/* emits 21 on rv770+, 23 on r600 */
-+static void
-+set_render_target(struct radeon_device *rdev, int format,
-+ int w, int h, u64 gpu_addr)
-+{
-+ u32 cb_color_info;
-+ int pitch, slice;
-+
-+ h = (h + 7) & ~7;
-+ if (h < 8)
-+ h = 8;
-+
-+ cb_color_info = ((format << 2) | (1 << 27));
-+ pitch = (w / 8) - 1;
-+ slice = ((w * h) / 64) - 1;
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, gpu_addr >> 8);
-+
-+ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
-+ radeon_ring_write(rdev, 2 << 0);
-+ }
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, 0);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, cb_color_info);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, 0);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, 0);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, 0);
-+}
-+
-+/* emits 5dw */
-+static void
-+cp_set_surface_sync(struct radeon_device *rdev,
-+ u32 sync_type, u32 size,
-+ u64 mc_addr)
-+{
-+ u32 cp_coher_size;
-+
-+ if (size == 0xffffffff)
-+ cp_coher_size = 0xffffffff;
-+ else
-+ cp_coher_size = ((size + 255) >> 8);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
-+ radeon_ring_write(rdev, sync_type);
-+ radeon_ring_write(rdev, cp_coher_size);
-+ radeon_ring_write(rdev, mc_addr >> 8);
-+ radeon_ring_write(rdev, 10); /* poll interval */
-+}
-+
-+/* emits 21dw + 1 surface sync = 26dw */
-+static void
-+set_shaders(struct radeon_device *rdev)
-+{
-+ u64 gpu_addr;
-+ u32 sq_pgm_resources;
-+
-+ /* setup shader regs */
-+ sq_pgm_resources = (1 << 0);
-+
-+ /* VS */
-+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, gpu_addr >> 8);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, sq_pgm_resources);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, 0);
-+
-+ /* PS */
-+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, gpu_addr >> 8);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, 2);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
-+ radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, 0);
-+
-+ cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
-+}
-+
-+/* emits 9 + 1 sync (5) = 14*/
-+static void
-+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
-+{
-+ u32 sq_vtx_constant_word2;
-+
-+ sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
-+ radeon_ring_write(rdev, 0x460);
-+ radeon_ring_write(rdev, gpu_addr & 0xffffffff);
-+ radeon_ring_write(rdev, 48 - 1);
-+ radeon_ring_write(rdev, sq_vtx_constant_word2);
-+ radeon_ring_write(rdev, 1 << 0);
-+ radeon_ring_write(rdev, 0);
-+ radeon_ring_write(rdev, 0);
-+ radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
-+
-+ if ((rdev->family == CHIP_RV610) ||
-+ (rdev->family == CHIP_RV620) ||
-+ (rdev->family == CHIP_RS780) ||
-+ (rdev->family == CHIP_RS880) ||
-+ (rdev->family == CHIP_RV710))
-+ cp_set_surface_sync(rdev,
-+ PACKET3_TC_ACTION_ENA, 48, gpu_addr);
-+ else
-+ cp_set_surface_sync(rdev,
-+ PACKET3_VC_ACTION_ENA, 48, gpu_addr);
-+}
-+
-+/* emits 9 */
-+static void
-+set_tex_resource(struct radeon_device *rdev,
-+ int format, int w, int h, int pitch,
-+ u64 gpu_addr)
-+{
-+ uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
-+
-+ if (h < 1)
-+ h = 1;
-+
-+ sq_tex_resource_word0 = (1 << 0);
-+ sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
-+ ((w - 1) << 19));
-+
-+ sq_tex_resource_word1 = (format << 26);
-+ sq_tex_resource_word1 |= ((h - 1) << 0);
-+
-+ sq_tex_resource_word4 = ((1 << 14) |
-+ (0 << 16) |
-+ (1 << 19) |
-+ (2 << 22) |
-+ (3 << 25));
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
-+ radeon_ring_write(rdev, 0);
-+ radeon_ring_write(rdev, sq_tex_resource_word0);
-+ radeon_ring_write(rdev, sq_tex_resource_word1);
-+ radeon_ring_write(rdev, gpu_addr >> 8);
-+ radeon_ring_write(rdev, gpu_addr >> 8);
-+ radeon_ring_write(rdev, sq_tex_resource_word4);
-+ radeon_ring_write(rdev, 0);
-+ radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
-+}
-+
-+/* emits 12 */
-+static void
-+set_scissors(struct radeon_device *rdev, int x1, int y1,
-+ int x2, int y2)
-+{
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-+ radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
-+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-+ radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-+ radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
-+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-+}
-+
-+/* emits 10 */
-+static void
-+draw_auto(struct radeon_device *rdev)
-+{
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-+ radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, DI_PT_RECTLIST);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-+ radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
-+ radeon_ring_write(rdev, 1);
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
-+ radeon_ring_write(rdev, 3);
-+ radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
-+
-+}
-+
-+/* emits 14 */
-+static void
-+set_default_state(struct radeon_device *rdev)
-+{
-+ u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
-+ u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
-+ int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
-+ int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
-+ int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
-+ u64 gpu_addr;
-+
-+ switch (rdev->family) {
-+ case CHIP_R600:
-+ num_ps_gprs = 192;
-+ num_vs_gprs = 56;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 136;
-+ num_vs_threads = 48;
-+ num_gs_threads = 4;
-+ num_es_threads = 4;
-+ num_ps_stack_entries = 128;
-+ num_vs_stack_entries = 128;
-+ num_gs_stack_entries = 0;
-+ num_es_stack_entries = 0;
-+ break;
-+ case CHIP_RV630:
-+ case CHIP_RV635:
-+ num_ps_gprs = 84;
-+ num_vs_gprs = 36;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 144;
-+ num_vs_threads = 40;
-+ num_gs_threads = 4;
-+ num_es_threads = 4;
-+ num_ps_stack_entries = 40;
-+ num_vs_stack_entries = 40;
-+ num_gs_stack_entries = 32;
-+ num_es_stack_entries = 16;
-+ break;
-+ case CHIP_RV610:
-+ case CHIP_RV620:
-+ case CHIP_RS780:
-+ case CHIP_RS880:
-+ default:
-+ num_ps_gprs = 84;
-+ num_vs_gprs = 36;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 136;
-+ num_vs_threads = 48;
-+ num_gs_threads = 4;
-+ num_es_threads = 4;
-+ num_ps_stack_entries = 40;
-+ num_vs_stack_entries = 40;
-+ num_gs_stack_entries = 32;
-+ num_es_stack_entries = 16;
-+ break;
-+ case CHIP_RV670:
-+ num_ps_gprs = 144;
-+ num_vs_gprs = 40;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 136;
-+ num_vs_threads = 48;
-+ num_gs_threads = 4;
-+ num_es_threads = 4;
-+ num_ps_stack_entries = 40;
-+ num_vs_stack_entries = 40;
-+ num_gs_stack_entries = 32;
-+ num_es_stack_entries = 16;
-+ break;
-+ case CHIP_RV770:
-+ num_ps_gprs = 192;
-+ num_vs_gprs = 56;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 188;
-+ num_vs_threads = 60;
-+ num_gs_threads = 0;
-+ num_es_threads = 0;
-+ num_ps_stack_entries = 256;
-+ num_vs_stack_entries = 256;
-+ num_gs_stack_entries = 0;
-+ num_es_stack_entries = 0;
-+ break;
-+ case CHIP_RV730:
-+ case CHIP_RV740:
-+ num_ps_gprs = 84;
-+ num_vs_gprs = 36;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 188;
-+ num_vs_threads = 60;
-+ num_gs_threads = 0;
-+ num_es_threads = 0;
-+ num_ps_stack_entries = 128;
-+ num_vs_stack_entries = 128;
-+ num_gs_stack_entries = 0;
-+ num_es_stack_entries = 0;
-+ break;
-+ case CHIP_RV710:
-+ num_ps_gprs = 192;
-+ num_vs_gprs = 56;
-+ num_temp_gprs = 4;
-+ num_gs_gprs = 0;
-+ num_es_gprs = 0;
-+ num_ps_threads = 144;
-+ num_vs_threads = 48;
-+ num_gs_threads = 0;
-+ num_es_threads = 0;
-+ num_ps_stack_entries = 128;
-+ num_vs_stack_entries = 128;
-+ num_gs_stack_entries = 0;
-+ num_es_stack_entries = 0;
-+ break;
-+ }
-+
-+ if ((rdev->family == CHIP_RV610) ||
-+ (rdev->family == CHIP_RV620) ||
-+ (rdev->family == CHIP_RS780) ||
-+ (rdev->family == CHIP_RS780) ||
-+ (rdev->family == CHIP_RV710))
-+ sq_config = 0;
-+ else
-+ sq_config = VC_ENABLE;
-+
-+ sq_config |= (DX9_CONSTS |
-+ ALU_INST_PREFER_VECTOR |
-+ PS_PRIO(0) |
-+ VS_PRIO(1) |
-+ GS_PRIO(2) |
-+ ES_PRIO(3));
-+
-+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
-+ NUM_VS_GPRS(num_vs_gprs) |
-+ NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
-+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
-+ NUM_ES_GPRS(num_es_gprs));
-+ sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
-+ NUM_VS_THREADS(num_vs_threads) |
-+ NUM_GS_THREADS(num_gs_threads) |
-+ NUM_ES_THREADS(num_es_threads));
-+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
-+ NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
-+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
-+ NUM_ES_STACK_ENTRIES(num_es_stack_entries));
-+
-+ /* emit an IB pointing at default state */
-+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
-+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-+ radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
-+ radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
-+ radeon_ring_write(rdev, (rdev->r600_blit.state_len / 4));
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
-+ radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
-+ /* SQ config */
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
-+ radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, sq_config);
-+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
-+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
-+ radeon_ring_write(rdev, sq_thread_resource_mgmt);
-+ radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
-+ radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
-+}
-+
-+static inline uint32_t i2f(uint32_t input)
-+{
-+ u32 result, i, exponent, fraction;
-+
-+ if ((input & 0x3fff) == 0)
-+ result = 0; /* 0 is a special case */
-+ else {
-+ exponent = 140; /* exponent biased by 127; */
-+ fraction = (input & 0x3fff) << 10; /* cheat and only
-+ handle numbers below 2^^15 */
-+ for (i = 0; i < 14; i++) {
-+ if (fraction & 0x800000)
-+ break;
-+ else {
-+ fraction = fraction << 1; /* keep
-+ shifting left until top bit = 1 */
-+ exponent = exponent - 1;
-+ }
-+ }
-+ result = exponent << 23 | (fraction & 0x7fffff); /* mask
-+ off top bit; assumed 1 */
-+ }
-+ return result;
-+}
-+
-+int r600_blit_init(struct radeon_device *rdev)
-+{
-+ u32 obj_size;
-+ int r;
-+ void *ptr;
-+
-+ rdev->r600_blit.state_offset = 0;
-+
-+ if (rdev->family >= CHIP_RV770)
-+ rdev->r600_blit.state_len = r7xx_default_size * 4;
-+ else
-+ rdev->r600_blit.state_len = r6xx_default_size * 4;
-+
-+ obj_size = rdev->r600_blit.state_len;
-+ obj_size = ALIGN(obj_size, 256);
-+
-+ rdev->r600_blit.vs_offset = obj_size;
-+ obj_size += r6xx_vs_size * 4;
-+ obj_size = ALIGN(obj_size, 256);
-+
-+ rdev->r600_blit.ps_offset = obj_size;
-+ obj_size += r6xx_ps_size * 4;
-+ obj_size = ALIGN(obj_size, 256);
-+
-+ r = radeon_object_create(rdev, NULL, obj_size,
-+ true, RADEON_GEM_DOMAIN_VRAM,
-+ false, &rdev->r600_blit.shader_obj);
-+ if (r) {
-+ DRM_ERROR("r600 failed to allocate shader\n");
-+ return r;
-+ }
-+
-+ r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-+ &rdev->r600_blit.shader_gpu_addr);
-+ if (r) {
-+ DRM_ERROR("failed to pin blit object %d\n", r);
-+ return r;
-+ }
-+
-+ DRM_DEBUG("r6xx blit allocated bo @ 0x%16llx %08x vs %08x ps %08x\n",
-+ rdev->r600_blit.shader_gpu_addr, obj_size,
-+ rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
-+
-+ r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
-+ if (r) {
-+ DRM_ERROR("failed to map blit object %d\n", r);
-+ return r;
-+ }
-+
-+ if (rdev->family >= CHIP_RV770)
-+ memcpy_toio(ptr + rdev->r600_blit.state_offset, r7xx_default_state, rdev->r600_blit.state_len);
-+ else
-+ memcpy_toio(ptr + rdev->r600_blit.state_offset, r6xx_default_state, rdev->r600_blit.state_len);
-+
-+ memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
-+ memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
-+
-+ radeon_object_kunmap(rdev->r600_blit.shader_obj);
-+ return 0;
-+}
-+
-+void r600_blit_fini(struct radeon_device *rdev)
-+{
-+ radeon_object_unpin(rdev->r600_blit.shader_obj);
-+ radeon_object_unref(&rdev->r600_blit.shader_obj);
-+}
-+
-+int r600_vb_ib_get(struct radeon_device *rdev)
-+{
-+ int r;
-+ r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
-+ if (r) {
-+ DRM_ERROR("failed to get IB for vertex buffer\n");
-+ return r;
-+ }
-+
-+ rdev->r600_blit.vb_total = 64*1024;
-+ rdev->r600_blit.vb_used = 0;
-+ return 0;
-+}
-+
-+void r600_vb_ib_put(struct radeon_device *rdev)
-+{
-+ mutex_lock(&rdev->ib_pool.mutex);
-+ radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
-+ list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
-+ mutex_unlock(&rdev->ib_pool.mutex);
-+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
-+}
-+
-+int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
-+{
-+ int r;
-+ int ring_size;
-+ const int max_size = 8192*8192;
-+
-+ r = r600_vb_ib_get(rdev);
-+ WARN_ON(r);
-+
-+ /* loops of emits 64 + fence emit possible */
-+ ring_size = ((size_bytes + max_size) / max_size) * 78;
-+ /* set default + shaders */
-+ ring_size += 40; /* shaders + def state */
-+ ring_size += 3; /* fence emit for VB IB */
-+ ring_size += 5; /* done copy */
-+ ring_size += 3; /* fence emit for done copy */
-+ r = radeon_ring_lock(rdev, ring_size);
-+ WARN_ON(r);
-+
-+ set_default_state(rdev); /* 14 */
-+ set_shaders(rdev); /* 26 */
-+ return 0;
-+}
-+
-+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
-+{
-+ int r;
-+
-+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
-+ radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
-+ /* wait for 3D idle clean */
-+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-+ radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-+ radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
-+
-+ if (rdev->r600_blit.vb_ib)
-+ r600_vb_ib_put(rdev);
-+
-+ if (fence)
-+ r = radeon_fence_emit(rdev, fence);
-+
-+ radeon_ring_unlock_commit(rdev);
-+}
-+
-+void r600_kms_blit_copy(struct radeon_device *rdev,
-+ u64 src_gpu_addr, u64 dst_gpu_addr,
-+ int size_bytes)
-+{
-+ int max_bytes;
-+ u64 vb_gpu_addr;
-+ u32 *vb;
-+
-+ DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
-+ size_bytes, rdev->r600_blit.vb_used);
-+ vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
-+ if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
-+ max_bytes = 8192;
-+
-+ while (size_bytes) {
-+ int cur_size = size_bytes;
-+ int src_x = src_gpu_addr & 255;
-+ int dst_x = dst_gpu_addr & 255;
-+ int h = 1;
-+ src_gpu_addr = src_gpu_addr & ~255;
-+ dst_gpu_addr = dst_gpu_addr & ~255;
-+
-+ if (!src_x && !dst_x) {
-+ h = (cur_size / max_bytes);
-+ if (h > 8192)
-+ h = 8192;
-+ if (h == 0)
-+ h = 1;
-+ else
-+ cur_size = max_bytes;
-+ } else {
-+ if (cur_size > max_bytes)
-+ cur_size = max_bytes;
-+ if (cur_size > (max_bytes - dst_x))
-+ cur_size = (max_bytes - dst_x);
-+ if (cur_size > (max_bytes - src_x))
-+ cur_size = (max_bytes - src_x);
-+ }
-+
-+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
-+ WARN_ON(1);
-+
-+#if 0
-+ r600_vb_ib_put(rdev);
-+
-+ r600_nomm_put_vb(dev);
-+ r600_nomm_get_vb(dev);
-+ if (!dev_priv->blit_vb)
-+ return;
-+ set_shaders(dev);
-+ vb = r600_nomm_get_vb_ptr(dev);
-+#endif
-+ }
-+
-+ vb[0] = i2f(dst_x);
-+ vb[1] = 0;
-+ vb[2] = i2f(src_x);
-+ vb[3] = 0;
-+
-+ vb[4] = i2f(dst_x);
-+ vb[5] = i2f(h);
-+ vb[6] = i2f(src_x);
-+ vb[7] = i2f(h);
-+
-+ vb[8] = i2f(dst_x + cur_size);
-+ vb[9] = i2f(h);
-+ vb[10] = i2f(src_x + cur_size);
-+ vb[11] = i2f(h);
-+
-+ /* src 9 */
-+ set_tex_resource(rdev, FMT_8,
-+ src_x + cur_size, h, src_x + cur_size,
-+ src_gpu_addr);
-+
-+ /* 5 */
-+ cp_set_surface_sync(rdev,
-+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
-+
-+ /* dst 23 */
-+ set_render_target(rdev, COLOR_8,
-+ dst_x + cur_size, h,
-+ dst_gpu_addr);
-+
-+ /* scissors 12 */
-+ set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
-+
-+ /* 14 */
-+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
-+ set_vtx_resource(rdev, vb_gpu_addr);
-+
-+ /* draw 10 */
-+ draw_auto(rdev);
-+
-+ /* 5 */
-+ cp_set_surface_sync(rdev,
-+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
-+ cur_size * h, dst_gpu_addr);
-+
-+ vb += 12;
-+ rdev->r600_blit.vb_used += 12 * 4;
-+
-+ src_gpu_addr += cur_size * h;
-+ dst_gpu_addr += cur_size * h;
-+ size_bytes -= cur_size * h;
-+ }
-+ } else {
-+ max_bytes = 8192 * 4;
-+
-+ while (size_bytes) {
-+ int cur_size = size_bytes;
-+ int src_x = (src_gpu_addr & 255);
-+ int dst_x = (dst_gpu_addr & 255);
-+ int h = 1;
-+ src_gpu_addr = src_gpu_addr & ~255;
-+ dst_gpu_addr = dst_gpu_addr & ~255;
-+
-+ if (!src_x && !dst_x) {
-+ h = (cur_size / max_bytes);
-+ if (h > 8192)
-+ h = 8192;
-+ if (h == 0)
-+ h = 1;
-+ else
-+ cur_size = max_bytes;
-+ } else {
-+ if (cur_size > max_bytes)
-+ cur_size = max_bytes;
-+ if (cur_size > (max_bytes - dst_x))
-+ cur_size = (max_bytes - dst_x);
-+ if (cur_size > (max_bytes - src_x))
-+ cur_size = (max_bytes - src_x);
-+ }
-+
-+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
-+ WARN_ON(1);
-+ }
-+#if 0
-+ if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) {
-+ r600_nomm_put_vb(dev);
-+ r600_nomm_get_vb(dev);
-+ if (!rdev->blit_vb)
-+ return;
-+
-+ set_shaders(dev);
-+ vb = r600_nomm_get_vb_ptr(dev);
-+ }
-+#endif
-+
-+ vb[0] = i2f(dst_x / 4);
-+ vb[1] = 0;
-+ vb[2] = i2f(src_x / 4);
-+ vb[3] = 0;
-+
-+ vb[4] = i2f(dst_x / 4);
-+ vb[5] = i2f(h);
-+ vb[6] = i2f(src_x / 4);
-+ vb[7] = i2f(h);
-+
-+ vb[8] = i2f((dst_x + cur_size) / 4);
-+ vb[9] = i2f(h);
-+ vb[10] = i2f((src_x + cur_size) / 4);
-+ vb[11] = i2f(h);
-+
-+ /* src 9 */
-+ set_tex_resource(rdev, FMT_8_8_8_8,
-+ (src_x + cur_size) / 4,
-+ h, (src_x + cur_size) / 4,
-+ src_gpu_addr);
-+ /* 5 */
-+ cp_set_surface_sync(rdev,
-+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
-+
-+ /* dst 23 */
-+ set_render_target(rdev, COLOR_8_8_8_8,
-+ dst_x + cur_size, h,
-+ dst_gpu_addr);
-+
-+ /* scissors 12 */
-+ set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
-+
-+ /* Vertex buffer setup 14 */
-+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
-+ set_vtx_resource(rdev, vb_gpu_addr);
-+
-+ /* draw 10 */
-+ draw_auto(rdev);
-+
-+ /* 5 */
-+ cp_set_surface_sync(rdev,
-+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
-+ cur_size * h, dst_gpu_addr);
-+
-+ /* 78 ring dwords per loop */
-+ vb += 12;
-+ rdev->r600_blit.vb_used += 12 * 4;
-+
-+ src_gpu_addr += cur_size * h;
-+ dst_gpu_addr += cur_size * h;
-+ size_bytes -= cur_size * h;
-+ }
-+ }
-+}
-+
-diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
-new file mode 100644
-index 0000000..d745e81
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
-@@ -0,0 +1,1072 @@
-+
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+
-+const u32 r6xx_default_state[] =
-+{
-+ 0xc0002400,
-+ 0x00000000,
-+ 0xc0012800,
-+ 0x80000000,
-+ 0x80000000,
-+ 0xc0004600,
-+ 0x00000016,
-+ 0xc0016800,
-+ 0x00000010,
-+ 0x00028000,
-+ 0xc0016800,
-+ 0x00000010,
-+ 0x00008000,
-+ 0xc0016800,
-+ 0x00000542,
-+ 0x07000003,
-+ 0xc0016800,
-+ 0x000005c5,
-+ 0x00000000,
-+ 0xc0016800,
-+ 0x00000363,
-+ 0x00000000,
-+ 0xc0016800,
-+ 0x0000060c,
-+ 0x82000000,
-+ 0xc0016800,
-+ 0x0000060e,
-+ 0x01020204,
-+ 0xc0016f00,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016f00,
-+ 0x00000001,
-+ 0x00000000,
-+ 0xc0096900,
-+ 0x0000022a,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000004,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000000a,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000000b,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000010c,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000010d,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000200,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000343,
-+ 0x00000060,
-+ 0xc0016900,
-+ 0x00000344,
-+ 0x00000040,
-+ 0xc0016900,
-+ 0x00000351,
-+ 0x0000aa00,
-+ 0xc0016900,
-+ 0x00000104,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000010e,
-+ 0x00000000,
-+ 0xc0046900,
-+ 0x00000105,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0036900,
-+ 0x00000109,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0046900,
-+ 0x0000030c,
-+ 0x01000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0046900,
-+ 0x00000048,
-+ 0x3f800000,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x0000008e,
-+ 0x0000000f,
-+ 0xc0016900,
-+ 0x00000080,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000083,
-+ 0x0000ffff,
-+ 0xc0016900,
-+ 0x00000084,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000085,
-+ 0x20002000,
-+ 0xc0016900,
-+ 0x00000086,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000087,
-+ 0x20002000,
-+ 0xc0016900,
-+ 0x00000088,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000089,
-+ 0x20002000,
-+ 0xc0016900,
-+ 0x0000008a,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000008b,
-+ 0x20002000,
-+ 0xc0016900,
-+ 0x0000008c,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000094,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x00000095,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000b4,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x00000096,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x00000097,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000b6,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x00000098,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x00000099,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000b8,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x0000009a,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x0000009b,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000ba,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x0000009c,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x0000009d,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000bc,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x0000009e,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x0000009f,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000be,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a0,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a1,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c0,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a2,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a3,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c2,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a4,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a5,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c4,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a6,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a7,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c6,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a8,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a9,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c8,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000aa,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000ab,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000ca,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000ac,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000ad,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000cc,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000ae,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000af,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000ce,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000b0,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000b1,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000d0,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000b2,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000b3,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000d2,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x00000293,
-+ 0x00004010,
-+ 0xc0016900,
-+ 0x00000300,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000301,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000312,
-+ 0xffffffff,
-+ 0xc0016900,
-+ 0x00000307,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000308,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000283,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000292,
-+ 0x00000000,
-+ 0xc0066900,
-+ 0x0000010f,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000206,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000207,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000208,
-+ 0x00000000,
-+ 0xc0046900,
-+ 0x00000303,
-+ 0x3f800000,
-+ 0x3f800000,
-+ 0x3f800000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x00000205,
-+ 0x00000004,
-+ 0xc0016900,
-+ 0x00000280,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000281,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000037e,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000382,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000380,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000383,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000381,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000282,
-+ 0x00000008,
-+ 0xc0016900,
-+ 0x00000302,
-+ 0x0000002d,
-+ 0xc0016900,
-+ 0x0000037f,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b2,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b6,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b7,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b8,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b9,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000225,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000229,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000237,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000100,
-+ 0x00000800,
-+ 0xc0016900,
-+ 0x00000101,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000102,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002a8,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002a9,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000103,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000284,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000290,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000285,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000286,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000287,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000288,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000289,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028a,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028b,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028c,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028d,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028e,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028f,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002a1,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002a5,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002ac,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002ad,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002ae,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002c8,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000206,
-+ 0x00000100,
-+ 0xc0016900,
-+ 0x00000204,
-+ 0x00010000,
-+ 0xc0036e00,
-+ 0x00000000,
-+ 0x00000012,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000008f,
-+ 0x0000000f,
-+ 0xc0016900,
-+ 0x000001e8,
-+ 0x00000001,
-+ 0xc0016900,
-+ 0x00000202,
-+ 0x00cc0000,
-+ 0xc0016900,
-+ 0x00000205,
-+ 0x00000244,
-+ 0xc0016900,
-+ 0x00000203,
-+ 0x00000210,
-+ 0xc0016900,
-+ 0x000001b1,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000185,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b3,
-+ 0x00000001,
-+ 0xc0016900,
-+ 0x000001b4,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000191,
-+ 0x00000b00,
-+ 0xc0016900,
-+ 0x000001b5,
-+ 0x00000000,
-+};
-+
-+const u32 r7xx_default_state[] =
-+{
-+ 0xc0012800,
-+ 0x80000000,
-+ 0x80000000,
-+ 0xc0004600,
-+ 0x00000016,
-+ 0xc0016800,
-+ 0x00000010,
-+ 0x00028000,
-+ 0xc0016800,
-+ 0x00000010,
-+ 0x00008000,
-+ 0xc0016800,
-+ 0x00000542,
-+ 0x07000002,
-+ 0xc0016800,
-+ 0x000005c5,
-+ 0x00000000,
-+ 0xc0016800,
-+ 0x00000363,
-+ 0x00004000,
-+ 0xc0016800,
-+ 0x0000060c,
-+ 0x00000000,
-+ 0xc0016800,
-+ 0x0000060e,
-+ 0x00420204,
-+ 0xc0016f00,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016f00,
-+ 0x00000001,
-+ 0x00000000,
-+ 0xc0096900,
-+ 0x0000022a,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000004,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000000a,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000000b,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000010c,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000010d,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000200,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000343,
-+ 0x00000060,
-+ 0xc0016900,
-+ 0x00000344,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000351,
-+ 0x0000aa00,
-+ 0xc0016900,
-+ 0x00000104,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000010e,
-+ 0x00000000,
-+ 0xc0046900,
-+ 0x00000105,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0046900,
-+ 0x0000030c,
-+ 0x01000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000008e,
-+ 0x0000000f,
-+ 0xc0016900,
-+ 0x00000080,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000083,
-+ 0x0000ffff,
-+ 0xc0016900,
-+ 0x00000084,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000085,
-+ 0x20002000,
-+ 0xc0016900,
-+ 0x00000086,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000087,
-+ 0x20002000,
-+ 0xc0016900,
-+ 0x00000088,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000089,
-+ 0x20002000,
-+ 0xc0016900,
-+ 0x0000008a,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000008b,
-+ 0x20002000,
-+ 0xc0016900,
-+ 0x0000008c,
-+ 0xaaaaaaaa,
-+ 0xc0016900,
-+ 0x00000094,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x00000095,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000b4,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x00000096,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x00000097,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000b6,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x00000098,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x00000099,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000b8,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x0000009a,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x0000009b,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000ba,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x0000009c,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x0000009d,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000bc,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x0000009e,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x0000009f,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000be,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a0,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a1,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c0,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a2,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a3,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c2,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a4,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a5,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c4,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a6,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a7,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c6,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000a8,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000a9,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000c8,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000aa,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000ab,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000ca,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000ac,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000ad,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000cc,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000ae,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000af,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000ce,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000b0,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000b1,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000d0,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x000000b2,
-+ 0x80000000,
-+ 0xc0016900,
-+ 0x000000b3,
-+ 0x20002000,
-+ 0xc0026900,
-+ 0x000000d2,
-+ 0x00000000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x00000293,
-+ 0x00514000,
-+ 0xc0016900,
-+ 0x00000300,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000301,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000312,
-+ 0xffffffff,
-+ 0xc0016900,
-+ 0x00000307,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000308,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000283,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000292,
-+ 0x00000000,
-+ 0xc0066900,
-+ 0x0000010f,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000206,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000207,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000208,
-+ 0x00000000,
-+ 0xc0046900,
-+ 0x00000303,
-+ 0x3f800000,
-+ 0x3f800000,
-+ 0x3f800000,
-+ 0x3f800000,
-+ 0xc0016900,
-+ 0x00000205,
-+ 0x00000004,
-+ 0xc0016900,
-+ 0x00000280,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000281,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000037e,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000382,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000380,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000383,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000381,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000282,
-+ 0x00000008,
-+ 0xc0016900,
-+ 0x00000302,
-+ 0x0000002d,
-+ 0xc0016900,
-+ 0x0000037f,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b2,
-+ 0x00000001,
-+ 0xc0016900,
-+ 0x000001b6,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b7,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b8,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b9,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000225,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000229,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000237,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000100,
-+ 0x00000800,
-+ 0xc0016900,
-+ 0x00000101,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000102,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002a8,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002a9,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000103,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000284,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000290,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000285,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000286,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000287,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000288,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000289,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028a,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028b,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028c,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028d,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028e,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000028f,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002a1,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002a5,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002ac,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002ad,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002ae,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000002c8,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000206,
-+ 0x00000100,
-+ 0xc0016900,
-+ 0x00000204,
-+ 0x00010000,
-+ 0xc0036e00,
-+ 0x00000000,
-+ 0x00000012,
-+ 0x00000000,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x0000008f,
-+ 0x0000000f,
-+ 0xc0016900,
-+ 0x000001e8,
-+ 0x00000001,
-+ 0xc0016900,
-+ 0x00000202,
-+ 0x00cc0000,
-+ 0xc0016900,
-+ 0x00000205,
-+ 0x00000244,
-+ 0xc0016900,
-+ 0x00000203,
-+ 0x00000210,
-+ 0xc0016900,
-+ 0x000001b1,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000185,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x000001b3,
-+ 0x00000001,
-+ 0xc0016900,
-+ 0x000001b4,
-+ 0x00000000,
-+ 0xc0016900,
-+ 0x00000191,
-+ 0x00000b00,
-+ 0xc0016900,
-+ 0x000001b5,
-+ 0x00000000,
-+};
-+
-+/* same for r6xx/r7xx */
-+const u32 r6xx_vs[] =
-+{
-+ 0x00000004,
-+ 0x81000000,
-+ 0x0000203c,
-+ 0x94000b08,
-+ 0x00004000,
-+ 0x14200b1a,
-+ 0x00000000,
-+ 0x00000000,
-+ 0x3c000000,
-+ 0x68cd1000,
-+ 0x00080000,
-+ 0x00000000,
-+};
-+
-+const u32 r6xx_ps[] =
-+{
-+ 0x00000002,
-+ 0x80800000,
-+ 0x00000000,
-+ 0x94200688,
-+ 0x00000010,
-+ 0x000d1000,
-+ 0xb0800000,
-+ 0x00000000,
-+};
-+
-+const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps);
-+const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs);
-+const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state);
-+const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);
-diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
-new file mode 100644
-index 0000000..fdc3b37
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
-@@ -0,0 +1,14 @@
-+
-+#ifndef R600_BLIT_SHADERS_H
-+#define R600_BLIT_SHADERS_H
-+
-+extern const u32 r6xx_ps[];
-+extern const u32 r6xx_vs[];
-+extern const u32 r7xx_default_state[];
-+extern const u32 r6xx_default_state[];
-+
-+
-+extern const u32 r6xx_ps_size, r6xx_vs_size;
-+extern const u32 r6xx_default_size, r7xx_default_size;
-+
-+#endif
-diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
-index 20f1790..6d5a711 100644
---- a/drivers/gpu/drm/radeon/r600_cp.c
-+++ b/drivers/gpu/drm/radeon/r600_cp.c
-@@ -31,7 +31,19 @@
- #include "radeon_drm.h"
- #include "radeon_drv.h"
-
--#include "r600_microcode.h"
-+#define PFP_UCODE_SIZE 576
-+#define PM4_UCODE_SIZE 1792
-+#define R700_PFP_UCODE_SIZE 848
-+#define R700_PM4_UCODE_SIZE 1360
-+
-+/* Firmware Names */
-+/*(DEBLOBBED)*/
-+
-+
-+int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
-+ unsigned family, u32 *ib, int *l);
-+void r600_cs_legacy_init(void);
-+
-
- # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
- # define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
-@@ -275,13 +306,122 @@ static void r600_vm_init(struct drm_device *dev)
- }
-
- /* load r600 microcode */
--#define r600_cp_load_microcode(dev_priv) \
-- do { \
-- DRM_ERROR("Missing Free microcode!\n"); \
-- r600_do_cleanup_cp(dev); \
-- return -EINVAL; \
-- } while (0)
--/*(DEBLOBBED)*/
-+static int r600_cp_init_microcode(drm_radeon_private_t *dev_priv)
-+{
-+ struct platform_device *pdev;
-+ const char *chip_name;
-+ size_t pfp_req_size, me_req_size;
-+ char fw_name[30];
-+ int err;
-+
-+ pdev = platform_device_register_simple("r600_cp", 0, NULL, 0);
-+ err = IS_ERR(pdev);
-+ if (err) {
-+ printk(KERN_ERR "r600_cp: Failed to register firmware\n");
-+ return -EINVAL;
-+ }
-+
-+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
-+ case CHIP_R600: chip_name = "R600"; break;
-+ case CHIP_RV610: chip_name = "RV610"; break;
-+ case CHIP_RV630: chip_name = "RV630"; break;
-+ case CHIP_RV620: chip_name = "RV620"; break;
-+ case CHIP_RV635: chip_name = "RV635"; break;
-+ case CHIP_RV670: chip_name = "RV670"; break;
-+ case CHIP_RS780:
-+ case CHIP_RS880: chip_name = "RS780"; break;
-+ case CHIP_RV770: chip_name = "RV770"; break;
-+ case CHIP_RV730:
-+ case CHIP_RV740: chip_name = "RV730"; break;
-+ case CHIP_RV710: chip_name = "RV710"; break;
-+ default: BUG();
-+ }
-+
-+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
-+ pfp_req_size = R700_PFP_UCODE_SIZE * 4;
-+ me_req_size = R700_PM4_UCODE_SIZE * 4;
-+ } else {
-+ pfp_req_size = PFP_UCODE_SIZE * 4;
-+ me_req_size = PM4_UCODE_SIZE * 12;
-+ }
-+
-+ DRM_INFO("Loading %s CP Microcode\n", chip_name);
-+
-+ snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
-+ err = reject_firmware(&dev_priv->pfp_fw, fw_name, &pdev->dev);
-+ if (err)
-+ goto out;
-+ if (dev_priv->pfp_fw->size != pfp_req_size) {
-+ printk(KERN_ERR
-+ "r600_cp: Bogus length %zu in firmware \"%s\"\n",
-+ dev_priv->pfp_fw->size, fw_name);
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
-+ err = reject_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
-+ if (err)
-+ goto out;
-+ if (dev_priv->me_fw->size != me_req_size) {
-+ printk(KERN_ERR
-+ "r600_cp: Bogus length %zu in firmware \"%s\"\n",
-+ dev_priv->me_fw->size, fw_name);
-+ err = -EINVAL;
-+ }
-+out:
-+ platform_device_unregister(pdev);
-+
-+ if (err) {
-+ if (err != -EINVAL)
-+ printk(KERN_ERR
-+ "r600_cp: Failed to load firmware \"%s\"\n",
-+ fw_name);
-+ release_firmware(dev_priv->pfp_fw);
-+ dev_priv->pfp_fw = NULL;
-+ release_firmware(dev_priv->me_fw);
-+ dev_priv->me_fw = NULL;
-+ }
-+ return err;
-+}
-+
-+static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
-+{
-+ const __be32 *fw_data;
-+ int i;
-+
-+ if (!dev_priv->me_fw || !dev_priv->pfp_fw)
-+ return;
-+
-+ r600_do_cp_stop(dev_priv);
-+
-+ RADEON_WRITE(R600_CP_RB_CNTL,
-+ R600_RB_NO_UPDATE |
-+ R600_RB_BLKSZ(15) |
-+ R600_RB_BUFSZ(3));
-+
-+ RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
-+ RADEON_READ(R600_GRBM_SOFT_RESET);
-+ DRM_UDELAY(15000);
-+ RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
-+
-+ fw_data = (const __be32 *)dev_priv->me_fw->data;
-+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
-+ for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
-+ RADEON_WRITE(R600_CP_ME_RAM_DATA,
-+ be32_to_cpup(fw_data++));
-+
-+ fw_data = (const __be32 *)dev_priv->pfp_fw->data;
-+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
-+ for (i = 0; i < PFP_UCODE_SIZE; i++)
-+ RADEON_WRITE(R600_CP_PFP_UCODE_DATA,
-+ be32_to_cpup(fw_data++));
-+
-+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
-+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
-+ RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
-+
-+}
-
- static void r700_vm_init(struct drm_device *dev)
- {
-@@ -459,13 +475,43 @@ static void r700_vm_init(struct drm_device *dev)
- }
-
- /* load r600 microcode */
--#define r700_cp_load_microcode(dev_priv) \
-- do { \
-- DRM_ERROR("Missing Free microcode!\n"); \
-- r600_do_cleanup_cp(dev); \
-- return -EINVAL; \
-- } while (0)
--/*(DEBLOBBED)*/
-+static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
-+{
-+ const __be32 *fw_data;
-+ int i;
-+
-+ if (!dev_priv->me_fw || !dev_priv->pfp_fw)
-+ return;
-+
-+ r600_do_cp_stop(dev_priv);
-+
-+ RADEON_WRITE(R600_CP_RB_CNTL,
-+ R600_RB_NO_UPDATE |
-+ (15 << 8) |
-+ (3 << 0));
-+
-+ RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
-+ RADEON_READ(R600_GRBM_SOFT_RESET);
-+ DRM_UDELAY(15000);
-+ RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
-+
-+ fw_data = (const __be32 *)dev_priv->pfp_fw->data;
-+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
-+ for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
-+ RADEON_WRITE(R600_CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
-+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
-+
-+ fw_data = (const __be32 *)dev_priv->me_fw->data;
-+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
-+ for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
-+ RADEON_WRITE(R600_CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
-+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
-+
-+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
-+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
-+ RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
-+
-+}
-
- static void r600_test_writeback(drm_radeon_private_t *dev_priv)
- {
-@@ -1874,6 +1863,8 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
-
- DRM_DEBUG("\n");
-
-+ mutex_init(&dev_priv->cs_mutex);
-+ r600_cs_legacy_init();
- /* if we require new memory map but we don't have it fail */
- if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
- DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
-@@ -1905,7 +1896,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
- /* Enable vblank on CRTC1 for older X servers
- */
- dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
--
-+ dev_priv->do_boxes = 0;
- dev_priv->cp_mode = init->cp_mode;
-
- /* We don't support anything other than bus-mastering ring mode,
-@@ -1991,11 +1982,11 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
- } else
- #endif
- {
-- dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
-+ dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset;
- dev_priv->ring_rptr->handle =
-- (void *)dev_priv->ring_rptr->offset;
-+ (void *)(unsigned long)dev_priv->ring_rptr->offset;
- dev->agp_buffer_map->handle =
-- (void *)dev->agp_buffer_map->offset;
-+ (void *)(unsigned long)dev->agp_buffer_map->offset;
-
- DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
- dev_priv->cp_ring->handle);
-@@ -2147,6 +2138,14 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
- r600_vm_init(dev);
- }
-
-+ if (!dev_priv->me_fw || !dev_priv->pfp_fw) {
-+ int err = r600_cp_init_microcode(dev_priv);
-+ if (err) {
-+ DRM_ERROR("Failed to load firmware!\n");
-+ r600_do_cleanup_cp(dev);
-+ return err;
-+ }
-+ }
- if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
- r700_cp_load_microcode(dev_priv);
- else
-@@ -2291,3 +2290,239 @@ int r600_cp_dispatch_indirect(struct drm_device *dev,
-
- return 0;
- }
-+
-+void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ struct drm_master *master = file_priv->master;
-+ struct drm_radeon_master_private *master_priv = master->driver_priv;
-+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
-+ int nbox = sarea_priv->nbox;
-+ struct drm_clip_rect *pbox = sarea_priv->boxes;
-+ int i, cpp, src_pitch, dst_pitch;
-+ uint64_t src, dst;
-+ RING_LOCALS;
-+ DRM_DEBUG("\n");
-+
-+ if (dev_priv->color_fmt == RADEON_COLOR_FORMAT_ARGB8888)
-+ cpp = 4;
-+ else
-+ cpp = 2;
-+
-+ if (sarea_priv->pfCurrentPage == 0) {
-+ src_pitch = dev_priv->back_pitch;
-+ dst_pitch = dev_priv->front_pitch;
-+ src = dev_priv->back_offset + dev_priv->fb_location;
-+ dst = dev_priv->front_offset + dev_priv->fb_location;
-+ } else {
-+ src_pitch = dev_priv->front_pitch;
-+ dst_pitch = dev_priv->back_pitch;
-+ src = dev_priv->front_offset + dev_priv->fb_location;
-+ dst = dev_priv->back_offset + dev_priv->fb_location;
-+ }
-+
-+ if (r600_prepare_blit_copy(dev, file_priv)) {
-+ DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
-+ return;
-+ }
-+ for (i = 0; i < nbox; i++) {
-+ int x = pbox[i].x1;
-+ int y = pbox[i].y1;
-+ int w = pbox[i].x2 - x;
-+ int h = pbox[i].y2 - y;
-+
-+ DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
-+
-+ r600_blit_swap(dev,
-+ src, dst,
-+ x, y, x, y, w, h,
-+ src_pitch, dst_pitch, cpp);
-+ }
-+ r600_done_blit_copy(dev);
-+
-+ /* Increment the frame counter. The client-side 3D driver must
-+ * throttle the framerate by waiting for this value before
-+ * performing the swapbuffer ioctl.
-+ */
-+ sarea_priv->last_frame++;
-+
-+ BEGIN_RING(3);
-+ R600_FRAME_AGE(sarea_priv->last_frame);
-+ ADVANCE_RING();
-+}
-+
-+int r600_cp_dispatch_texture(struct drm_device *dev,
-+ struct drm_file *file_priv,
-+ drm_radeon_texture_t *tex,
-+ drm_radeon_tex_image_t *image)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+ struct drm_buf *buf;
-+ u32 *buffer;
-+ const u8 __user *data;
-+ int size, pass_size;
-+ u64 src_offset, dst_offset;
-+
-+ if (!radeon_check_offset(dev_priv, tex->offset)) {
-+ DRM_ERROR("Invalid destination offset\n");
-+ return -EINVAL;
-+ }
-+
-+ /* this might fail for zero-sized uploads - are those illegal? */
-+ if (!radeon_check_offset(dev_priv, tex->offset + tex->height * tex->pitch - 1)) {
-+ DRM_ERROR("Invalid final destination offset\n");
-+ return -EINVAL;
-+ }
-+
-+ size = tex->height * tex->pitch;
-+
-+ if (size == 0)
-+ return 0;
-+
-+ dst_offset = tex->offset;
-+
-+ if (r600_prepare_blit_copy(dev, file_priv)) {
-+ DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
-+ return -EAGAIN;
-+ }
-+ do {
-+ data = (const u8 __user *)image->data;
-+ pass_size = size;
-+
-+ buf = radeon_freelist_get(dev);
-+ if (!buf) {
-+ DRM_DEBUG("EAGAIN\n");
-+ if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
-+ return -EFAULT;
-+ return -EAGAIN;
-+ }
-+
-+ if (pass_size > buf->total)
-+ pass_size = buf->total;
-+
-+ /* Dispatch the indirect buffer.
-+ */
-+ buffer =
-+ (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
-+
-+ if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
-+ DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
-+ return -EFAULT;
-+ }
-+
-+ buf->file_priv = file_priv;
-+ buf->used = pass_size;
-+ src_offset = dev_priv->gart_buffers_offset + buf->offset;
-+
-+ r600_blit_copy(dev, src_offset, dst_offset, pass_size);
-+
-+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
-+
-+ /* Update the input parameters for next time */
-+ image->data = (const u8 __user *)image->data + pass_size;
-+ dst_offset += pass_size;
-+ size -= pass_size;
-+ } while (size > 0);
-+ r600_done_blit_copy(dev);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Legacy cs ioctl
-+ */
-+static u32 radeon_cs_id_get(struct drm_radeon_private *radeon)
-+{
-+ /* FIXME: check if wrap affect last reported wrap & sequence */
-+ radeon->cs_id_scnt = (radeon->cs_id_scnt + 1) & 0x00FFFFFF;
-+ if (!radeon->cs_id_scnt) {
-+ /* increment wrap counter */
-+ radeon->cs_id_wcnt += 0x01000000;
-+ /* valid sequence counter start at 1 */
-+ radeon->cs_id_scnt = 1;
-+ }
-+ return (radeon->cs_id_scnt | radeon->cs_id_wcnt);
-+}
-+
-+static void r600_cs_id_emit(drm_radeon_private_t *dev_priv, u32 *id)
-+{
-+ RING_LOCALS;
-+
-+ *id = radeon_cs_id_get(dev_priv);
-+
-+ /* SCRATCH 2 */
-+ BEGIN_RING(3);
-+ R600_CLEAR_AGE(*id);
-+ ADVANCE_RING();
-+ COMMIT_RING();
-+}
-+
-+static int r600_ib_get(struct drm_device *dev,
-+ struct drm_file *fpriv,
-+ struct drm_buf **buffer)
-+{
-+ struct drm_buf *buf;
-+
-+ *buffer = NULL;
-+ buf = radeon_freelist_get(dev);
-+ if (!buf) {
-+ return -EBUSY;
-+ }
-+ buf->file_priv = fpriv;
-+ *buffer = buf;
-+ return 0;
-+}
-+
-+static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf,
-+ struct drm_file *fpriv, int l, int r)
-+{
-+ drm_radeon_private_t *dev_priv = dev->dev_private;
-+
-+ if (buf) {
-+ if (!r)
-+ r600_cp_dispatch_indirect(dev, buf, 0, l * 4);
-+ radeon_cp_discard_buffer(dev, fpriv->master, buf);
-+ COMMIT_RING();
-+ }
-+}
-+
-+int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
-+{
-+ struct drm_radeon_private *dev_priv = dev->dev_private;
-+ struct drm_radeon_cs *cs = data;
-+ struct drm_buf *buf;
-+ unsigned family;
-+ int l, r = 0;
-+ u32 *ib, cs_id = 0;
-+
-+ if (dev_priv == NULL) {
-+ DRM_ERROR("called with no initialization\n");
-+ return -EINVAL;
-+ }
-+ family = dev_priv->flags & RADEON_FAMILY_MASK;
-+ if (family < CHIP_R600) {
-+ DRM_ERROR("cs ioctl valid only for R6XX & R7XX in legacy mode\n");
-+ return -EINVAL;
-+ }
-+ mutex_lock(&dev_priv->cs_mutex);
-+ /* get ib */
-+ r = r600_ib_get(dev, fpriv, &buf);
-+ if (r) {
-+ DRM_ERROR("ib_get failed\n");
-+ goto out;
-+ }
-+ ib = dev->agp_buffer_map->handle + buf->offset;
-+ /* now parse command stream */
-+ r = r600_cs_legacy(dev, data, fpriv, family, ib, &l);
-+ if (r) {
-+ goto out;
-+ }
-+
-+out:
-+ r600_ib_free(dev, buf, fpriv, l, r);
-+ /* emit cs id sequence */
-+ r600_cs_id_emit(dev_priv, &cs_id);
-+ cs->cs_id = cs_id;
-+ mutex_unlock(&dev_priv->cs_mutex);
-+ return r;
-+}
-diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
-new file mode 100644
-index 0000000..39bf634
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r600_cs.c
-@@ -0,0 +1,658 @@
-+/*
-+ * Copyright 2008 Advanced Micro Devices, Inc.
-+ * Copyright 2008 Red Hat Inc.
-+ * Copyright 2009 Jerome Glisse.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: Dave Airlie
-+ * Alex Deucher
-+ * Jerome Glisse
-+ */
-+#include "drmP.h"
-+#include "radeon.h"
-+#include "radeon_share.h"
-+#include "r600d.h"
-+#include "avivod.h"
-+
-+static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
-+ struct radeon_cs_reloc **cs_reloc);
-+static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
-+ struct radeon_cs_reloc **cs_reloc);
-+typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
-+static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
-+
-+/**
-+ * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
-+ * @parser: parser structure holding parsing context.
-+ * @pkt: where to store packet informations
-+ *
-+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
-+ * if packet is bigger than remaining ib size. or if packets is unknown.
-+ **/
-+int r600_cs_packet_parse(struct radeon_cs_parser *p,
-+ struct radeon_cs_packet *pkt,
-+ unsigned idx)
-+{
-+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
-+ uint32_t header;
-+
-+ if (idx >= ib_chunk->length_dw) {
-+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
-+ idx, ib_chunk->length_dw);
-+ return -EINVAL;
-+ }
-+ header = ib_chunk->kdata[idx];
-+ pkt->idx = idx;
-+ pkt->type = CP_PACKET_GET_TYPE(header);
-+ pkt->count = CP_PACKET_GET_COUNT(header);
-+ pkt->one_reg_wr = 0;
-+ switch (pkt->type) {
-+ case PACKET_TYPE0:
-+ pkt->reg = CP_PACKET0_GET_REG(header);
-+ break;
-+ case PACKET_TYPE3:
-+ pkt->opcode = CP_PACKET3_GET_OPCODE(header);
-+ break;
-+ case PACKET_TYPE2:
-+ pkt->count = -1;
-+ break;
-+ default:
-+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
-+ return -EINVAL;
-+ }
-+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
-+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
-+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+/**
-+ * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
-+ * @parser: parser structure holding parsing context.
-+ * @data: pointer to relocation data
-+ * @offset_start: starting offset
-+ * @offset_mask: offset mask (to align start offset on)
-+ * @reloc: reloc informations
-+ *
-+ * Check next packet is relocation packet3, do bo validation and compute
-+ * GPU offset using the provided start.
-+ **/
-+static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
-+ struct radeon_cs_reloc **cs_reloc)
-+{
-+ struct radeon_cs_chunk *ib_chunk;
-+ struct radeon_cs_chunk *relocs_chunk;
-+ struct radeon_cs_packet p3reloc;
-+ unsigned idx;
-+ int r;
-+
-+ if (p->chunk_relocs_idx == -1) {
-+ DRM_ERROR("No relocation chunk !\n");
-+ return -EINVAL;
-+ }
-+ *cs_reloc = NULL;
-+ ib_chunk = &p->chunks[p->chunk_ib_idx];
-+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
-+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
-+ if (r) {
-+ return r;
-+ }
-+ p->idx += p3reloc.count + 2;
-+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
-+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
-+ p3reloc.idx);
-+ return -EINVAL;
-+ }
-+ idx = ib_chunk->kdata[p3reloc.idx + 1];
-+ if (idx >= relocs_chunk->length_dw) {
-+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
-+ idx, relocs_chunk->length_dw);
-+ return -EINVAL;
-+ }
-+ /* FIXME: we assume reloc size is 4 dwords */
-+ *cs_reloc = p->relocs_ptr[(idx / 4)];
-+ return 0;
-+}
-+
-+/**
-+ * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
-+ * @parser: parser structure holding parsing context.
-+ * @data: pointer to relocation data
-+ * @offset_start: starting offset
-+ * @offset_mask: offset mask (to align start offset on)
-+ * @reloc: reloc informations
-+ *
-+ * Check next packet is relocation packet3, do bo validation and compute
-+ * GPU offset using the provided start.
-+ **/
-+static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
-+ struct radeon_cs_reloc **cs_reloc)
-+{
-+ struct radeon_cs_chunk *ib_chunk;
-+ struct radeon_cs_chunk *relocs_chunk;
-+ struct radeon_cs_packet p3reloc;
-+ unsigned idx;
-+ int r;
-+
-+ if (p->chunk_relocs_idx == -1) {
-+ DRM_ERROR("No relocation chunk !\n");
-+ return -EINVAL;
-+ }
-+ *cs_reloc = NULL;
-+ ib_chunk = &p->chunks[p->chunk_ib_idx];
-+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
-+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
-+ if (r) {
-+ return r;
-+ }
-+ p->idx += p3reloc.count + 2;
-+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
-+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
-+ p3reloc.idx);
-+ return -EINVAL;
-+ }
-+ idx = ib_chunk->kdata[p3reloc.idx + 1];
-+ if (idx >= relocs_chunk->length_dw) {
-+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
-+ idx, relocs_chunk->length_dw);
-+ return -EINVAL;
-+ }
-+ *cs_reloc = &p->relocs[0];
-+ (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
-+ (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
-+ return 0;
-+}
-+
-+static int r600_packet0_check(struct radeon_cs_parser *p,
-+ struct radeon_cs_packet *pkt,
-+ unsigned idx, unsigned reg)
-+{
-+ switch (reg) {
-+ case AVIVO_D1MODE_VLINE_START_END:
-+ case AVIVO_D2MODE_VLINE_START_END:
-+ break;
-+ default:
-+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
-+ reg, idx);
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
-+ struct radeon_cs_packet *pkt)
-+{
-+ unsigned reg, i;
-+ unsigned idx;
-+ int r;
-+
-+ idx = pkt->idx + 1;
-+ reg = pkt->reg;
-+ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
-+ r = r600_packet0_check(p, pkt, idx, reg);
-+ if (r) {
-+ return r;
-+ }
-+ }
-+ return 0;
-+}
-+
-+static int r600_packet3_check(struct radeon_cs_parser *p,
-+ struct radeon_cs_packet *pkt)
-+{
-+ struct radeon_cs_chunk *ib_chunk;
-+ struct radeon_cs_reloc *reloc;
-+ volatile u32 *ib;
-+ unsigned idx;
-+ unsigned i;
-+ unsigned start_reg, end_reg, reg;
-+ int r;
-+
-+ ib = p->ib->ptr;
-+ ib_chunk = &p->chunks[p->chunk_ib_idx];
-+ idx = pkt->idx + 1;
-+ switch (pkt->opcode) {
-+ case PACKET3_START_3D_CMDBUF:
-+ if (p->family >= CHIP_RV770 || pkt->count) {
-+ DRM_ERROR("bad START_3D\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_CONTEXT_CONTROL:
-+ if (pkt->count != 1) {
-+ DRM_ERROR("bad CONTEXT_CONTROL\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_INDEX_TYPE:
-+ case PACKET3_NUM_INSTANCES:
-+ if (pkt->count) {
-+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_DRAW_INDEX:
-+ if (pkt->count != 3) {
-+ DRM_ERROR("bad DRAW_INDEX\n");
-+ return -EINVAL;
-+ }
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad DRAW_INDEX\n");
-+ return -EINVAL;
-+ }
-+ ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
-+ ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
-+ break;
-+ case PACKET3_DRAW_INDEX_AUTO:
-+ if (pkt->count != 1) {
-+ DRM_ERROR("bad DRAW_INDEX_AUTO\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_DRAW_INDEX_IMMD_BE:
-+ case PACKET3_DRAW_INDEX_IMMD:
-+ if (pkt->count < 2) {
-+ DRM_ERROR("bad DRAW_INDEX_IMMD\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_WAIT_REG_MEM:
-+ if (pkt->count != 5) {
-+ DRM_ERROR("bad WAIT_REG_MEM\n");
-+ return -EINVAL;
-+ }
-+ /* bit 4 is reg (0) or mem (1) */
-+ if (ib_chunk->kdata[idx+0] & 0x10) {
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad WAIT_REG_MEM\n");
-+ return -EINVAL;
-+ }
-+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
-+ ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
-+ }
-+ break;
-+ case PACKET3_SURFACE_SYNC:
-+ if (pkt->count != 3) {
-+ DRM_ERROR("bad SURFACE_SYNC\n");
-+ return -EINVAL;
-+ }
-+ /* 0xffffffff/0x0 is flush all cache flag */
-+ if (ib_chunk->kdata[idx+1] != 0xffffffff ||
-+ ib_chunk->kdata[idx+2] != 0) {
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad SURFACE_SYNC\n");
-+ return -EINVAL;
-+ }
-+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-+ }
-+ break;
-+ case PACKET3_EVENT_WRITE:
-+ if (pkt->count != 2 && pkt->count != 0) {
-+ DRM_ERROR("bad EVENT_WRITE\n");
-+ return -EINVAL;
-+ }
-+ if (pkt->count) {
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad EVENT_WRITE\n");
-+ return -EINVAL;
-+ }
-+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
-+ ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
-+ }
-+ break;
-+ case PACKET3_EVENT_WRITE_EOP:
-+ if (pkt->count != 4) {
-+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
-+ return -EINVAL;
-+ }
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad EVENT_WRITE\n");
-+ return -EINVAL;
-+ }
-+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
-+ ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
-+ break;
-+ case PACKET3_SET_CONFIG_REG:
-+ start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
-+ end_reg = 4 * pkt->count + start_reg - 4;
-+ if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
-+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
-+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
-+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
-+ return -EINVAL;
-+ }
-+ for (i = 0; i < pkt->count; i++) {
-+ reg = start_reg + (4 * i);
-+ switch (reg) {
-+ case CP_COHER_BASE:
-+ /* use PACKET3_SURFACE_SYNC */
-+ return -EINVAL;
-+ default:
-+ break;
-+ }
-+ }
-+ break;
-+ case PACKET3_SET_CONTEXT_REG:
-+ start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
-+ end_reg = 4 * pkt->count + start_reg - 4;
-+ if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
-+ (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
-+ (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
-+ DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
-+ return -EINVAL;
-+ }
-+ for (i = 0; i < pkt->count; i++) {
-+ reg = start_reg + (4 * i);
-+ switch (reg) {
-+ case DB_DEPTH_BASE:
-+ case CB_COLOR0_BASE:
-+ case CB_COLOR1_BASE:
-+ case CB_COLOR2_BASE:
-+ case CB_COLOR3_BASE:
-+ case CB_COLOR4_BASE:
-+ case CB_COLOR5_BASE:
-+ case CB_COLOR6_BASE:
-+ case CB_COLOR7_BASE:
-+ case SQ_PGM_START_FS:
-+ case SQ_PGM_START_ES:
-+ case SQ_PGM_START_VS:
-+ case SQ_PGM_START_GS:
-+ case SQ_PGM_START_PS:
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad SET_CONTEXT_REG "
-+ "0x%04X\n", reg);
-+ return -EINVAL;
-+ }
-+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-+ break;
-+ case VGT_DMA_BASE:
-+ case VGT_DMA_BASE_HI:
-+ /* These should be handled by DRAW_INDEX packet 3 */
-+ case VGT_STRMOUT_BASE_OFFSET_0:
-+ case VGT_STRMOUT_BASE_OFFSET_1:
-+ case VGT_STRMOUT_BASE_OFFSET_2:
-+ case VGT_STRMOUT_BASE_OFFSET_3:
-+ case VGT_STRMOUT_BASE_OFFSET_HI_0:
-+ case VGT_STRMOUT_BASE_OFFSET_HI_1:
-+ case VGT_STRMOUT_BASE_OFFSET_HI_2:
-+ case VGT_STRMOUT_BASE_OFFSET_HI_3:
-+ case VGT_STRMOUT_BUFFER_BASE_0:
-+ case VGT_STRMOUT_BUFFER_BASE_1:
-+ case VGT_STRMOUT_BUFFER_BASE_2:
-+ case VGT_STRMOUT_BUFFER_BASE_3:
-+ case VGT_STRMOUT_BUFFER_OFFSET_0:
-+ case VGT_STRMOUT_BUFFER_OFFSET_1:
-+ case VGT_STRMOUT_BUFFER_OFFSET_2:
-+ case VGT_STRMOUT_BUFFER_OFFSET_3:
-+ /* These should be handled by STRMOUT_BUFFER packet 3 */
-+ DRM_ERROR("bad context reg: 0x%08x\n", reg);
-+ return -EINVAL;
-+ default:
-+ break;
-+ }
-+ }
-+ break;
-+ case PACKET3_SET_RESOURCE:
-+ if (pkt->count % 7) {
-+ DRM_ERROR("bad SET_RESOURCE\n");
-+ return -EINVAL;
-+ }
-+ start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET;
-+ end_reg = 4 * pkt->count + start_reg - 4;
-+ if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
-+ (start_reg >= PACKET3_SET_RESOURCE_END) ||
-+ (end_reg >= PACKET3_SET_RESOURCE_END)) {
-+ DRM_ERROR("bad SET_RESOURCE\n");
-+ return -EINVAL;
-+ }
-+ for (i = 0; i < (pkt->count / 7); i++) {
-+ switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) {
-+ case SQ_TEX_VTX_VALID_TEXTURE:
-+ /* tex base */
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad SET_RESOURCE\n");
-+ return -EINVAL;
-+ }
-+ ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-+ /* tex mip base */
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad SET_RESOURCE\n");
-+ return -EINVAL;
-+ }
-+ ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-+ break;
-+ case SQ_TEX_VTX_VALID_BUFFER:
-+ /* vtx base */
-+ r = r600_cs_packet_next_reloc(p, &reloc);
-+ if (r) {
-+ DRM_ERROR("bad SET_RESOURCE\n");
-+ return -EINVAL;
-+ }
-+ ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
-+ ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
-+ break;
-+ case SQ_TEX_VTX_INVALID_TEXTURE:
-+ case SQ_TEX_VTX_INVALID_BUFFER:
-+ default:
-+ DRM_ERROR("bad SET_RESOURCE\n");
-+ return -EINVAL;
-+ }
-+ }
-+ break;
-+ case PACKET3_SET_ALU_CONST:
-+ start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET;
-+ end_reg = 4 * pkt->count + start_reg - 4;
-+ if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
-+ (start_reg >= PACKET3_SET_ALU_CONST_END) ||
-+ (end_reg >= PACKET3_SET_ALU_CONST_END)) {
-+ DRM_ERROR("bad SET_ALU_CONST\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_SET_BOOL_CONST:
-+ start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
-+ end_reg = 4 * pkt->count + start_reg - 4;
-+ if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
-+ (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
-+ (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
-+ DRM_ERROR("bad SET_BOOL_CONST\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_SET_LOOP_CONST:
-+ start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
-+ end_reg = 4 * pkt->count + start_reg - 4;
-+ if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
-+ (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
-+ (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
-+ DRM_ERROR("bad SET_LOOP_CONST\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_SET_CTL_CONST:
-+ start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET;
-+ end_reg = 4 * pkt->count + start_reg - 4;
-+ if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
-+ (start_reg >= PACKET3_SET_CTL_CONST_END) ||
-+ (end_reg >= PACKET3_SET_CTL_CONST_END)) {
-+ DRM_ERROR("bad SET_CTL_CONST\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_SET_SAMPLER:
-+ if (pkt->count % 3) {
-+ DRM_ERROR("bad SET_SAMPLER\n");
-+ return -EINVAL;
-+ }
-+ start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET;
-+ end_reg = 4 * pkt->count + start_reg - 4;
-+ if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
-+ (start_reg >= PACKET3_SET_SAMPLER_END) ||
-+ (end_reg >= PACKET3_SET_SAMPLER_END)) {
-+ DRM_ERROR("bad SET_SAMPLER\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_SURFACE_BASE_UPDATE:
-+ if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
-+ DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
-+ return -EINVAL;
-+ }
-+ if (pkt->count) {
-+ DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PACKET3_NOP:
-+ break;
-+ default:
-+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+int r600_cs_parse(struct radeon_cs_parser *p)
-+{
-+ struct radeon_cs_packet pkt;
-+ int r;
-+
-+ do {
-+ r = r600_cs_packet_parse(p, &pkt, p->idx);
-+ if (r) {
-+ return r;
-+ }
-+ p->idx += pkt.count + 2;
-+ switch (pkt.type) {
-+ case PACKET_TYPE0:
-+ r = r600_cs_parse_packet0(p, &pkt);
-+ break;
-+ case PACKET_TYPE2:
-+ break;
-+ case PACKET_TYPE3:
-+ r = r600_packet3_check(p, &pkt);
-+ break;
-+ default:
-+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
-+ return -EINVAL;
-+ }
-+ if (r) {
-+ return r;
-+ }
-+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
-+#if 0
-+ for (r = 0; r < p->ib->length_dw; r++) {
-+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
-+ mdelay(1);
-+ }
-+#endif
-+ return 0;
-+}
-+
-+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
-+{
-+ if (p->chunk_relocs_idx == -1) {
-+ return 0;
-+ }
-+ p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
-+ if (p->relocs == NULL) {
-+ return -ENOMEM;
-+ }
-+ return 0;
-+}
-+
-+/**
-+ * cs_parser_fini() - clean parser states
-+ * @parser: parser structure holding parsing context.
-+ * @error: error number
-+ *
-+ * If error is set than unvalidate buffer, otherwise just free memory
-+ * used by parsing context.
-+ **/
-+static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
-+{
-+ unsigned i;
-+
-+ kfree(parser->relocs);
-+ for (i = 0; i < parser->nchunks; i++) {
-+ kfree(parser->chunks[i].kdata);
-+ }
-+ kfree(parser->chunks);
-+ kfree(parser->chunks_array);
-+}
-+
-+int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
-+ unsigned family, u32 *ib, int *l)
-+{
-+ struct radeon_cs_parser parser;
-+ struct radeon_cs_chunk *ib_chunk;
-+ struct radeon_ib fake_ib;
-+ int r;
-+
-+ /* initialize parser */
-+ memset(&parser, 0, sizeof(struct radeon_cs_parser));
-+ parser.filp = filp;
-+ parser.rdev = NULL;
-+ parser.family = family;
-+ parser.ib = &fake_ib;
-+ fake_ib.ptr = ib;
-+ r = radeon_cs_parser_init(&parser, data);
-+ if (r) {
-+ DRM_ERROR("Failed to initialize parser !\n");
-+ r600_cs_parser_fini(&parser, r);
-+ return r;
-+ }
-+ r = r600_cs_parser_relocs_legacy(&parser);
-+ if (r) {
-+ DRM_ERROR("Failed to parse relocation !\n");
-+ r600_cs_parser_fini(&parser, r);
-+ return r;
-+ }
-+ /* Copy the packet into the IB, the parser will read from the
-+ * input memory (cached) and write to the IB (which can be
-+ * uncached). */
-+ ib_chunk = &parser.chunks[parser.chunk_ib_idx];
-+ parser.ib->length_dw = ib_chunk->length_dw;
-+ memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
-+ *l = parser.ib->length_dw;
-+ r = r600_cs_parse(&parser);
-+ if (r) {
-+ DRM_ERROR("Invalid command stream !\n");
-+ r600_cs_parser_fini(&parser, r);
-+ return r;
-+ }
-+ r600_cs_parser_fini(&parser, r);
-+ return r;
-+}
-+
-+void r600_cs_legacy_init(void)
-+{
-+ r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
-+}
-diff --git a/drivers/gpu/drm/radeon/r600_microcode.h b/drivers/gpu/drm/radeon/r600_microcode.h
-deleted file mode 100644
-index 778c8b4..0000000
---- a/drivers/gpu/drm/radeon/r600_microcode.h
-+++ /dev/null
-@@ -1,39 +0,0 @@
--/*
-- * Copyright 2008-2009 Advanced Micro Devices, Inc.
-- * All Rights Reserved.
-- *
-- * Permission is hereby granted, free of charge, to any person obtaining a
-- * copy of this software and associated documentation files (the "Software"),
-- * to deal in the Software without restriction, including without limitation
-- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-- * and/or sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following conditions:
-- *
-- * The above copyright notice and this permission notice (including the next
-- * paragraph) shall be included in all copies or substantial portions of the
-- * Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-- *
-- */
--
--#ifndef R600_MICROCODE_H
--#define R600_MICROCODE_H
--
--static const int ME_JUMP_TABLE_START = 1764;
--static const int ME_JUMP_TABLE_END = 1792;
--
--#define PFP_UCODE_SIZE 576
--#define PM4_UCODE_SIZE 1792
--#define R700_PFP_UCODE_SIZE 848
--#define R700_PM4_UCODE_SIZE 1360
--
--/*(DEBLOBBED)*/
--
--#endif
-diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
-new file mode 100644
-index 0000000..723295f
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/r600d.h
-@@ -0,0 +1,661 @@
-+/*
-+ * Copyright 2009 Advanced Micro Devices, Inc.
-+ * Copyright 2009 Red Hat Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: Dave Airlie
-+ * Alex Deucher
-+ * Jerome Glisse
-+ */
-+#ifndef R600D_H
-+#define R600D_H
-+
-+#define CP_PACKET2 0x80000000
-+#define PACKET2_PAD_SHIFT 0
-+#define PACKET2_PAD_MASK (0x3fffffff << 0)
-+
-+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-+
-+#define R6XX_MAX_SH_GPRS 256
-+#define R6XX_MAX_TEMP_GPRS 16
-+#define R6XX_MAX_SH_THREADS 256
-+#define R6XX_MAX_SH_STACK_ENTRIES 4096
-+#define R6XX_MAX_BACKENDS 8
-+#define R6XX_MAX_BACKENDS_MASK 0xff
-+#define R6XX_MAX_SIMDS 8
-+#define R6XX_MAX_SIMDS_MASK 0xff
-+#define R6XX_MAX_PIPES 8
-+#define R6XX_MAX_PIPES_MASK 0xff
-+
-+/* PTE flags */
-+#define PTE_VALID (1 << 0)
-+#define PTE_SYSTEM (1 << 1)
-+#define PTE_SNOOPED (1 << 2)
-+#define PTE_READABLE (1 << 5)
-+#define PTE_WRITEABLE (1 << 6)
-+
-+/* Registers */
-+#define ARB_POP 0x2418
-+#define ENABLE_TC128 (1 << 30)
-+#define ARB_GDEC_RD_CNTL 0x246C
-+
-+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
-+#define CC_RB_BACKEND_DISABLE 0x98F4
-+#define BACKEND_DISABLE(x) ((x) << 16)
-+
-+#define CB_COLOR0_BASE 0x28040
-+#define CB_COLOR1_BASE 0x28044
-+#define CB_COLOR2_BASE 0x28048
-+#define CB_COLOR3_BASE 0x2804C
-+#define CB_COLOR4_BASE 0x28050
-+#define CB_COLOR5_BASE 0x28054
-+#define CB_COLOR6_BASE 0x28058
-+#define CB_COLOR7_BASE 0x2805C
-+#define CB_COLOR7_FRAG 0x280FC
-+
-+#define CB_COLOR0_SIZE 0x28060
-+#define CB_COLOR0_VIEW 0x28080
-+#define CB_COLOR0_INFO 0x280a0
-+#define CB_COLOR0_TILE 0x280c0
-+#define CB_COLOR0_FRAG 0x280e0
-+#define CB_COLOR0_MASK 0x28100
-+
-+#define CONFIG_MEMSIZE 0x5428
-+#define CP_STAT 0x8680
-+#define CP_COHER_BASE 0x85F8
-+#define CP_DEBUG 0xC1FC
-+#define R_0086D8_CP_ME_CNTL 0x86D8
-+#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
-+#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
-+#define CP_ME_RAM_DATA 0xC160
-+#define CP_ME_RAM_RADDR 0xC158
-+#define CP_ME_RAM_WADDR 0xC15C
-+#define CP_MEQ_THRESHOLDS 0x8764
-+#define MEQ_END(x) ((x) << 16)
-+#define ROQ_END(x) ((x) << 24)
-+#define CP_PERFMON_CNTL 0x87FC
-+#define CP_PFP_UCODE_ADDR 0xC150
-+#define CP_PFP_UCODE_DATA 0xC154
-+#define CP_QUEUE_THRESHOLDS 0x8760
-+#define ROQ_IB1_START(x) ((x) << 0)
-+#define ROQ_IB2_START(x) ((x) << 8)
-+#define CP_RB_BASE 0xC100
-+#define CP_RB_CNTL 0xC104
-+#define RB_BUFSZ(x) ((x)<<0)
-+#define RB_BLKSZ(x) ((x)<<8)
-+#define RB_NO_UPDATE (1<<27)
-+#define RB_RPTR_WR_ENA (1<<31)
-+#define BUF_SWAP_32BIT (2 << 16)
-+#define CP_RB_RPTR 0x8700
-+#define CP_RB_RPTR_ADDR 0xC10C
-+#define CP_RB_RPTR_ADDR_HI 0xC110
-+#define CP_RB_RPTR_WR 0xC108
-+#define CP_RB_WPTR 0xC114
-+#define CP_RB_WPTR_ADDR 0xC118
-+#define CP_RB_WPTR_ADDR_HI 0xC11C
-+#define CP_RB_WPTR_DELAY 0x8704
-+#define CP_ROQ_IB1_STAT 0x8784
-+#define CP_ROQ_IB2_STAT 0x8788
-+#define CP_SEM_WAIT_TIMER 0x85BC
-+
-+#define DB_DEBUG 0x9830
-+#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
-+#define DB_DEPTH_BASE 0x2800C
-+#define DB_WATERMARKS 0x9838
-+#define DEPTH_FREE(x) ((x) << 0)
-+#define DEPTH_FLUSH(x) ((x) << 5)
-+#define DEPTH_PENDING_FREE(x) ((x) << 15)
-+#define DEPTH_CACHELINE_FREE(x) ((x) << 20)
-+
-+#define DCP_TILING_CONFIG 0x6CA0
-+#define PIPE_TILING(x) ((x) << 1)
-+#define BANK_TILING(x) ((x) << 4)
-+#define GROUP_SIZE(x) ((x) << 6)
-+#define ROW_TILING(x) ((x) << 8)
-+#define BANK_SWAPS(x) ((x) << 11)
-+#define SAMPLE_SPLIT(x) ((x) << 14)
-+#define BACKEND_MAP(x) ((x) << 16)
-+
-+#define GB_TILING_CONFIG 0x98F0
-+
-+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
-+#define INACTIVE_QD_PIPES(x) ((x) << 8)
-+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
-+#define INACTIVE_SIMDS(x) ((x) << 16)
-+#define INACTIVE_SIMDS_MASK 0x00FF0000
-+
-+#define SQ_CONFIG 0x8c00
-+# define VC_ENABLE (1 << 0)
-+# define EXPORT_SRC_C (1 << 1)
-+# define DX9_CONSTS (1 << 2)
-+# define ALU_INST_PREFER_VECTOR (1 << 3)
-+# define DX10_CLAMP (1 << 4)
-+# define CLAUSE_SEQ_PRIO(x) ((x) << 8)
-+# define PS_PRIO(x) ((x) << 24)
-+# define VS_PRIO(x) ((x) << 26)
-+# define GS_PRIO(x) ((x) << 28)
-+# define ES_PRIO(x) ((x) << 30)
-+#define SQ_GPR_RESOURCE_MGMT_1 0x8c04
-+# define NUM_PS_GPRS(x) ((x) << 0)
-+# define NUM_VS_GPRS(x) ((x) << 16)
-+# define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
-+#define SQ_GPR_RESOURCE_MGMT_2 0x8c08
-+# define NUM_GS_GPRS(x) ((x) << 0)
-+# define NUM_ES_GPRS(x) ((x) << 16)
-+#define SQ_THREAD_RESOURCE_MGMT 0x8c0c
-+# define NUM_PS_THREADS(x) ((x) << 0)
-+# define NUM_VS_THREADS(x) ((x) << 8)
-+# define NUM_GS_THREADS(x) ((x) << 16)
-+# define NUM_ES_THREADS(x) ((x) << 24)
-+#define SQ_STACK_RESOURCE_MGMT_1 0x8c10
-+# define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
-+# define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
-+#define SQ_STACK_RESOURCE_MGMT_2 0x8c14
-+# define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
-+# define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
-+
-+#define GRBM_CNTL 0x8000
-+# define GRBM_READ_TIMEOUT(x) ((x) << 0)
-+#define GRBM_STATUS 0x8010
-+#define CMDFIFO_AVAIL_MASK 0x0000001F
-+#define GUI_ACTIVE (1<<31)
-+#define GRBM_STATUS2 0x8014
-+#define GRBM_SOFT_RESET 0x8020
-+#define SOFT_RESET_CP (1<<0)
-+
-+#define HDP_HOST_PATH_CNTL 0x2C00
-+#define HDP_NONSURFACE_BASE 0x2C04
-+#define HDP_NONSURFACE_INFO 0x2C08
-+#define HDP_NONSURFACE_SIZE 0x2C0C
-+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
-+#define HDP_TILING_CONFIG 0x2F3C
-+
-+#define MC_VM_AGP_TOP 0x2184
-+#define MC_VM_AGP_BOT 0x2188
-+#define MC_VM_AGP_BASE 0x218C
-+#define MC_VM_FB_LOCATION 0x2180
-+#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
-+#define ENABLE_L1_TLB (1 << 0)
-+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
-+#define ENABLE_L1_STRICT_ORDERING (1 << 2)
-+#define SYSTEM_ACCESS_MODE_MASK 0x000000C0
-+#define SYSTEM_ACCESS_MODE_SHIFT 6
-+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 6)
-+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 6)
-+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 6)
-+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 6)
-+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 8)
-+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8)
-+#define ENABLE_SEMAPHORE_MODE (1 << 10)
-+#define ENABLE_WAIT_L2_QUERY (1 << 11)
-+#define EFFECTIVE_L1_TLB_SIZE(x) (((x) & 7) << 12)
-+#define EFFECTIVE_L1_TLB_SIZE_MASK 0x00007000
-+#define EFFECTIVE_L1_TLB_SIZE_SHIFT 12
-+#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15)
-+#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000
-+#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15
-+#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0
-+#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC
-+#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204
-+#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208
-+#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C
-+#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200
-+#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4
-+#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8
-+#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210
-+#define MC_VM_L1_TLB_MCB_WR_HDP_CNTL 0x2218
-+#define MC_VM_L1_TLB_MCB_WR_PDMA_CNTL 0x221C
-+#define MC_VM_L1_TLB_MCB_WR_SEM_CNTL 0x2220
-+#define MC_VM_L1_TLB_MCB_WR_SYS_CNTL 0x2214
-+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
-+#define LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
-+#define LOGICAL_PAGE_NUMBER_SHIFT 0
-+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
-+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
-+
-+#define PA_CL_ENHANCE 0x8A14
-+#define CLIP_VTX_REORDER_ENA (1 << 0)
-+#define NUM_CLIP_SEQ(x) ((x) << 1)
-+#define PA_SC_AA_CONFIG 0x28C04
-+#define PA_SC_AA_SAMPLE_LOCS_2S 0x8B40
-+#define PA_SC_AA_SAMPLE_LOCS_4S 0x8B44
-+#define PA_SC_AA_SAMPLE_LOCS_8S_WD0 0x8B48
-+#define PA_SC_AA_SAMPLE_LOCS_8S_WD1 0x8B4C
-+#define S0_X(x) ((x) << 0)
-+#define S0_Y(x) ((x) << 4)
-+#define S1_X(x) ((x) << 8)
-+#define S1_Y(x) ((x) << 12)
-+#define S2_X(x) ((x) << 16)
-+#define S2_Y(x) ((x) << 20)
-+#define S3_X(x) ((x) << 24)
-+#define S3_Y(x) ((x) << 28)
-+#define S4_X(x) ((x) << 0)
-+#define S4_Y(x) ((x) << 4)
-+#define S5_X(x) ((x) << 8)
-+#define S5_Y(x) ((x) << 12)
-+#define S6_X(x) ((x) << 16)
-+#define S6_Y(x) ((x) << 20)
-+#define S7_X(x) ((x) << 24)
-+#define S7_Y(x) ((x) << 28)
-+#define PA_SC_CLIPRECT_RULE 0x2820c
-+#define PA_SC_ENHANCE 0x8BF0
-+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
-+#define FORCE_EOV_MAX_TILE_CNT(x) ((x) << 12)
-+#define PA_SC_LINE_STIPPLE 0x28A0C
-+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
-+#define PA_SC_MODE_CNTL 0x28A4C
-+#define PA_SC_MULTI_CHIP_CNTL 0x8B20
-+
-+#define PA_SC_SCREEN_SCISSOR_TL 0x28030
-+#define PA_SC_GENERIC_SCISSOR_TL 0x28240
-+#define PA_SC_WINDOW_SCISSOR_TL 0x28204
-+
-+#define PCIE_PORT_INDEX 0x0038
-+#define PCIE_PORT_DATA 0x003C
-+
-+#define RAMCFG 0x2408
-+#define NOOFBANK_SHIFT 0
-+#define NOOFBANK_MASK 0x00000001
-+#define NOOFRANK_SHIFT 1
-+#define NOOFRANK_MASK 0x00000002
-+#define NOOFROWS_SHIFT 2
-+#define NOOFROWS_MASK 0x0000001C
-+#define NOOFCOLS_SHIFT 5
-+#define NOOFCOLS_MASK 0x00000060
-+#define CHANSIZE_SHIFT 7
-+#define CHANSIZE_MASK 0x00000080
-+#define BURSTLENGTH_SHIFT 8
-+#define BURSTLENGTH_MASK 0x00000100
-+#define CHANSIZE_OVERRIDE (1 << 10)
-+
-+#define SCRATCH_REG0 0x8500
-+#define SCRATCH_REG1 0x8504
-+#define SCRATCH_REG2 0x8508
-+#define SCRATCH_REG3 0x850C
-+#define SCRATCH_REG4 0x8510
-+#define SCRATCH_REG5 0x8514
-+#define SCRATCH_REG6 0x8518
-+#define SCRATCH_REG7 0x851C
-+#define SCRATCH_UMSK 0x8540
-+#define SCRATCH_ADDR 0x8544
-+
-+#define SPI_CONFIG_CNTL 0x9100
-+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
-+#define DISABLE_INTERP_1 (1 << 5)
-+#define SPI_CONFIG_CNTL_1 0x913C
-+#define VTX_DONE_DELAY(x) ((x) << 0)
-+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
-+#define SPI_INPUT_Z 0x286D8
-+#define SPI_PS_IN_CONTROL_0 0x286CC
-+#define NUM_INTERP(x) ((x)<<0)
-+#define POSITION_ENA (1<<8)
-+#define POSITION_CENTROID (1<<9)
-+#define POSITION_ADDR(x) ((x)<<10)
-+#define PARAM_GEN(x) ((x)<<15)
-+#define PARAM_GEN_ADDR(x) ((x)<<19)
-+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
-+#define PERSP_GRADIENT_ENA (1<<28)
-+#define LINEAR_GRADIENT_ENA (1<<29)
-+#define POSITION_SAMPLE (1<<30)
-+#define BARYC_AT_SAMPLE_ENA (1<<31)
-+#define SPI_PS_IN_CONTROL_1 0x286D0
-+#define GEN_INDEX_PIX (1<<0)
-+#define GEN_INDEX_PIX_ADDR(x) ((x)<<1)
-+#define FRONT_FACE_ENA (1<<8)
-+#define FRONT_FACE_CHAN(x) ((x)<<9)
-+#define FRONT_FACE_ALL_BITS (1<<11)
-+#define FRONT_FACE_ADDR(x) ((x)<<12)
-+#define FOG_ADDR(x) ((x)<<17)
-+#define FIXED_PT_POSITION_ENA (1<<24)
-+#define FIXED_PT_POSITION_ADDR(x) ((x)<<25)
-+
-+#define SQ_MS_FIFO_SIZES 0x8CF0
-+#define CACHE_FIFO_SIZE(x) ((x) << 0)
-+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
-+#define DONE_FIFO_HIWATER(x) ((x) << 16)
-+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
-+#define SQ_PGM_START_ES 0x28880
-+#define SQ_PGM_START_FS 0x28894
-+#define SQ_PGM_START_GS 0x2886C
-+#define SQ_PGM_START_PS 0x28840
-+#define SQ_PGM_RESOURCES_PS 0x28850
-+#define SQ_PGM_EXPORTS_PS 0x28854
-+#define SQ_PGM_CF_OFFSET_PS 0x288cc
-+#define SQ_PGM_START_VS 0x28858
-+#define SQ_PGM_RESOURCES_VS 0x28868
-+#define SQ_PGM_CF_OFFSET_VS 0x288d0
-+#define SQ_VTX_CONSTANT_WORD6_0 0x38018
-+#define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30)
-+#define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3)
-+#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
-+#define SQ_TEX_VTX_INVALID_BUFFER 0x1
-+#define SQ_TEX_VTX_VALID_TEXTURE 0x2
-+#define SQ_TEX_VTX_VALID_BUFFER 0x3
-+
-+
-+#define SX_MISC 0x28350
-+#define SX_DEBUG_1 0x9054
-+#define SMX_EVENT_RELEASE (1 << 0)
-+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
-+
-+#define TA_CNTL_AUX 0x9508
-+#define DISABLE_CUBE_WRAP (1 << 0)
-+#define DISABLE_CUBE_ANISO (1 << 1)
-+#define SYNC_GRADIENT (1 << 24)
-+#define SYNC_WALKER (1 << 25)
-+#define SYNC_ALIGNER (1 << 26)
-+#define BILINEAR_PRECISION_6_BIT (0 << 31)
-+#define BILINEAR_PRECISION_8_BIT (1 << 31)
-+
-+#define TC_CNTL 0x9608
-+#define TC_L2_SIZE(x) ((x)<<5)
-+#define L2_DISABLE_LATE_HIT (1<<9)
-+
-+
-+#define VGT_CACHE_INVALIDATION 0x88C4
-+#define CACHE_INVALIDATION(x) ((x)<<0)
-+#define VC_ONLY 0
-+#define TC_ONLY 1
-+#define VC_AND_TC 2
-+#define VGT_DMA_BASE 0x287E8
-+#define VGT_DMA_BASE_HI 0x287E4
-+#define VGT_ES_PER_GS 0x88CC
-+#define VGT_GS_PER_ES 0x88C8
-+#define VGT_GS_PER_VS 0x88E8
-+#define VGT_GS_VERTEX_REUSE 0x88D4
-+#define VGT_PRIMITIVE_TYPE 0x8958
-+#define VGT_NUM_INSTANCES 0x8974
-+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
-+#define DEALLOC_DIST_MASK 0x0000007F
-+#define VGT_STRMOUT_BASE_OFFSET_0 0x28B10
-+#define VGT_STRMOUT_BASE_OFFSET_1 0x28B14
-+#define VGT_STRMOUT_BASE_OFFSET_2 0x28B18
-+#define VGT_STRMOUT_BASE_OFFSET_3 0x28B1c
-+#define VGT_STRMOUT_BASE_OFFSET_HI_0 0x28B44
-+#define VGT_STRMOUT_BASE_OFFSET_HI_1 0x28B48
-+#define VGT_STRMOUT_BASE_OFFSET_HI_2 0x28B4c
-+#define VGT_STRMOUT_BASE_OFFSET_HI_3 0x28B50
-+#define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8
-+#define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8
-+#define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8
-+#define VGT_STRMOUT_BUFFER_BASE_3 0x28B08
-+#define VGT_STRMOUT_BUFFER_OFFSET_0 0x28ADC
-+#define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC
-+#define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC
-+#define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C
-+#define VGT_STRMOUT_EN 0x28AB0
-+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
-+#define VTX_REUSE_DEPTH_MASK 0x000000FF
-+#define VGT_EVENT_INITIATOR 0x28a90
-+# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
-+
-+#define VM_CONTEXT0_CNTL 0x1410
-+#define ENABLE_CONTEXT (1 << 0)
-+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
-+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
-+#define VM_CONTEXT0_INVALIDATION_LOW_ADDR 0x1490
-+#define VM_CONTEXT0_INVALIDATION_HIGH_ADDR 0x14B0
-+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x1574
-+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x1594
-+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x15B4
-+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1554
-+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
-+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
-+#define RESPONSE_TYPE_MASK 0x000000F0
-+#define RESPONSE_TYPE_SHIFT 4
-+#define VM_L2_CNTL 0x1400
-+#define ENABLE_L2_CACHE (1 << 0)
-+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
-+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
-+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 13)
-+#define VM_L2_CNTL2 0x1404
-+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
-+#define INVALIDATE_L2_CACHE (1 << 1)
-+#define VM_L2_CNTL3 0x1408
-+#define BANK_SELECT_0(x) (((x) & 0x1f) << 0)
-+#define BANK_SELECT_1(x) (((x) & 0x1f) << 5)
-+#define L2_CACHE_UPDATE_MODE(x) (((x) & 3) << 10)
-+#define VM_L2_STATUS 0x140C
-+#define L2_BUSY (1 << 0)
-+
-+#define WAIT_UNTIL 0x8040
-+#define WAIT_2D_IDLE_bit (1 << 14)
-+#define WAIT_3D_IDLE_bit (1 << 15)
-+#define WAIT_2D_IDLECLEAN_bit (1 << 16)
-+#define WAIT_3D_IDLECLEAN_bit (1 << 17)
-+
-+
-+
-+/*
-+ * PM4
-+ */
-+#define PACKET_TYPE0 0
-+#define PACKET_TYPE1 1
-+#define PACKET_TYPE2 2
-+#define PACKET_TYPE3 3
-+
-+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
-+ (((reg) >> 2) & 0xFFFF) | \
-+ ((n) & 0x3FFF) << 16)
-+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
-+ (((op) & 0xFF) << 8) | \
-+ ((n) & 0x3FFF) << 16)
-+
-+/* Packet 3 types */
-+#define PACKET3_NOP 0x10
-+#define PACKET3_INDIRECT_BUFFER_END 0x17
-+#define PACKET3_SET_PREDICATION 0x20
-+#define PACKET3_REG_RMW 0x21
-+#define PACKET3_COND_EXEC 0x22
-+#define PACKET3_PRED_EXEC 0x23
-+#define PACKET3_START_3D_CMDBUF 0x24
-+#define PACKET3_DRAW_INDEX_2 0x27
-+#define PACKET3_CONTEXT_CONTROL 0x28
-+#define PACKET3_DRAW_INDEX_IMMD_BE 0x29
-+#define PACKET3_INDEX_TYPE 0x2A
-+#define PACKET3_DRAW_INDEX 0x2B
-+#define PACKET3_DRAW_INDEX_AUTO 0x2D
-+#define PACKET3_DRAW_INDEX_IMMD 0x2E
-+#define PACKET3_NUM_INSTANCES 0x2F
-+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
-+#define PACKET3_INDIRECT_BUFFER_MP 0x38
-+#define PACKET3_MEM_SEMAPHORE 0x39
-+#define PACKET3_MPEG_INDEX 0x3A
-+#define PACKET3_WAIT_REG_MEM 0x3C
-+#define PACKET3_MEM_WRITE 0x3D
-+#define PACKET3_INDIRECT_BUFFER 0x32
-+#define PACKET3_CP_INTERRUPT 0x40
-+#define PACKET3_SURFACE_SYNC 0x43
-+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
-+# define PACKET3_TC_ACTION_ENA (1 << 23)
-+# define PACKET3_VC_ACTION_ENA (1 << 24)
-+# define PACKET3_CB_ACTION_ENA (1 << 25)
-+# define PACKET3_DB_ACTION_ENA (1 << 26)
-+# define PACKET3_SH_ACTION_ENA (1 << 27)
-+# define PACKET3_SMX_ACTION_ENA (1 << 28)
-+#define PACKET3_ME_INITIALIZE 0x44
-+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
-+#define PACKET3_COND_WRITE 0x45
-+#define PACKET3_EVENT_WRITE 0x46
-+#define PACKET3_EVENT_WRITE_EOP 0x47
-+#define PACKET3_ONE_REG_WRITE 0x57
-+#define PACKET3_SET_CONFIG_REG 0x68
-+#define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000
-+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
-+#define PACKET3_SET_CONTEXT_REG 0x69
-+#define PACKET3_SET_CONTEXT_REG_OFFSET 0x00028000
-+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
-+#define PACKET3_SET_ALU_CONST 0x6A
-+#define PACKET3_SET_ALU_CONST_OFFSET 0x00030000
-+#define PACKET3_SET_ALU_CONST_END 0x00032000
-+#define PACKET3_SET_BOOL_CONST 0x6B
-+#define PACKET3_SET_BOOL_CONST_OFFSET 0x0003e380
-+#define PACKET3_SET_BOOL_CONST_END 0x00040000
-+#define PACKET3_SET_LOOP_CONST 0x6C
-+#define PACKET3_SET_LOOP_CONST_OFFSET 0x0003e200
-+#define PACKET3_SET_LOOP_CONST_END 0x0003e380
-+#define PACKET3_SET_RESOURCE 0x6D
-+#define PACKET3_SET_RESOURCE_OFFSET 0x00038000
-+#define PACKET3_SET_RESOURCE_END 0x0003c000
-+#define PACKET3_SET_SAMPLER 0x6E
-+#define PACKET3_SET_SAMPLER_OFFSET 0x0003c000
-+#define PACKET3_SET_SAMPLER_END 0x0003cff0
-+#define PACKET3_SET_CTL_CONST 0x6F
-+#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
-+#define PACKET3_SET_CTL_CONST_END 0x0003e200
-+#define PACKET3_SURFACE_BASE_UPDATE 0x73
-+
-+
-+#define R_008020_GRBM_SOFT_RESET 0x8020
-+#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
-+#define S_008020_SOFT_RESET_CB(x) (((x) & 1) << 1)
-+#define S_008020_SOFT_RESET_CR(x) (((x) & 1) << 2)
-+#define S_008020_SOFT_RESET_DB(x) (((x) & 1) << 3)
-+#define S_008020_SOFT_RESET_PA(x) (((x) & 1) << 5)
-+#define S_008020_SOFT_RESET_SC(x) (((x) & 1) << 6)
-+#define S_008020_SOFT_RESET_SMX(x) (((x) & 1) << 7)
-+#define S_008020_SOFT_RESET_SPI(x) (((x) & 1) << 8)
-+#define S_008020_SOFT_RESET_SH(x) (((x) & 1) << 9)
-+#define S_008020_SOFT_RESET_SX(x) (((x) & 1) << 10)
-+#define S_008020_SOFT_RESET_TC(x) (((x) & 1) << 11)
-+#define S_008020_SOFT_RESET_TA(x) (((x) & 1) << 12)
-+#define S_008020_SOFT_RESET_VC(x) (((x) & 1) << 13)
-+#define S_008020_SOFT_RESET_VGT(x) (((x) & 1) << 14)
-+#define R_008010_GRBM_STATUS 0x8010
-+#define S_008010_CMDFIFO_AVAIL(x) (((x) & 0x1F) << 0)
-+#define S_008010_CP_RQ_PENDING(x) (((x) & 1) << 6)
-+#define S_008010_CF_RQ_PENDING(x) (((x) & 1) << 7)
-+#define S_008010_PF_RQ_PENDING(x) (((x) & 1) << 8)
-+#define S_008010_GRBM_EE_BUSY(x) (((x) & 1) << 10)
-+#define S_008010_VC_BUSY(x) (((x) & 1) << 11)
-+#define S_008010_DB03_CLEAN(x) (((x) & 1) << 12)
-+#define S_008010_CB03_CLEAN(x) (((x) & 1) << 13)
-+#define S_008010_VGT_BUSY_NO_DMA(x) (((x) & 1) << 16)
-+#define S_008010_VGT_BUSY(x) (((x) & 1) << 17)
-+#define S_008010_TA03_BUSY(x) (((x) & 1) << 18)
-+#define S_008010_TC_BUSY(x) (((x) & 1) << 19)
-+#define S_008010_SX_BUSY(x) (((x) & 1) << 20)
-+#define S_008010_SH_BUSY(x) (((x) & 1) << 21)
-+#define S_008010_SPI03_BUSY(x) (((x) & 1) << 22)
-+#define S_008010_SMX_BUSY(x) (((x) & 1) << 23)
-+#define S_008010_SC_BUSY(x) (((x) & 1) << 24)
-+#define S_008010_PA_BUSY(x) (((x) & 1) << 25)
-+#define S_008010_DB03_BUSY(x) (((x) & 1) << 26)
-+#define S_008010_CR_BUSY(x) (((x) & 1) << 27)
-+#define S_008010_CP_COHERENCY_BUSY(x) (((x) & 1) << 28)
-+#define S_008010_CP_BUSY(x) (((x) & 1) << 29)
-+#define S_008010_CB03_BUSY(x) (((x) & 1) << 30)
-+#define S_008010_GUI_ACTIVE(x) (((x) & 1) << 31)
-+#define G_008010_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x1F)
-+#define G_008010_CP_RQ_PENDING(x) (((x) >> 6) & 1)
-+#define G_008010_CF_RQ_PENDING(x) (((x) >> 7) & 1)
-+#define G_008010_PF_RQ_PENDING(x) (((x) >> 8) & 1)
-+#define G_008010_GRBM_EE_BUSY(x) (((x) >> 10) & 1)
-+#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
-+#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
-+#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
-+#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
-+#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
-+#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
-+#define G_008010_TC_BUSY(x) (((x) >> 19) & 1)
-+#define G_008010_SX_BUSY(x) (((x) >> 20) & 1)
-+#define G_008010_SH_BUSY(x) (((x) >> 21) & 1)
-+#define G_008010_SPI03_BUSY(x) (((x) >> 22) & 1)
-+#define G_008010_SMX_BUSY(x) (((x) >> 23) & 1)
-+#define G_008010_SC_BUSY(x) (((x) >> 24) & 1)
-+#define G_008010_PA_BUSY(x) (((x) >> 25) & 1)
-+#define G_008010_DB03_BUSY(x) (((x) >> 26) & 1)
-+#define G_008010_CR_BUSY(x) (((x) >> 27) & 1)
-+#define G_008010_CP_COHERENCY_BUSY(x) (((x) >> 28) & 1)
-+#define G_008010_CP_BUSY(x) (((x) >> 29) & 1)
-+#define G_008010_CB03_BUSY(x) (((x) >> 30) & 1)
-+#define G_008010_GUI_ACTIVE(x) (((x) >> 31) & 1)
-+#define R_008014_GRBM_STATUS2 0x8014
-+#define S_008014_CR_CLEAN(x) (((x) & 1) << 0)
-+#define S_008014_SMX_CLEAN(x) (((x) & 1) << 1)
-+#define S_008014_SPI0_BUSY(x) (((x) & 1) << 8)
-+#define S_008014_SPI1_BUSY(x) (((x) & 1) << 9)
-+#define S_008014_SPI2_BUSY(x) (((x) & 1) << 10)
-+#define S_008014_SPI3_BUSY(x) (((x) & 1) << 11)
-+#define S_008014_TA0_BUSY(x) (((x) & 1) << 12)
-+#define S_008014_TA1_BUSY(x) (((x) & 1) << 13)
-+#define S_008014_TA2_BUSY(x) (((x) & 1) << 14)
-+#define S_008014_TA3_BUSY(x) (((x) & 1) << 15)
-+#define S_008014_DB0_BUSY(x) (((x) & 1) << 16)
-+#define S_008014_DB1_BUSY(x) (((x) & 1) << 17)
-+#define S_008014_DB2_BUSY(x) (((x) & 1) << 18)
-+#define S_008014_DB3_BUSY(x) (((x) & 1) << 19)
-+#define S_008014_CB0_BUSY(x) (((x) & 1) << 20)
-+#define S_008014_CB1_BUSY(x) (((x) & 1) << 21)
-+#define S_008014_CB2_BUSY(x) (((x) & 1) << 22)
-+#define S_008014_CB3_BUSY(x) (((x) & 1) << 23)
-+#define G_008014_CR_CLEAN(x) (((x) >> 0) & 1)
-+#define G_008014_SMX_CLEAN(x) (((x) >> 1) & 1)
-+#define G_008014_SPI0_BUSY(x) (((x) >> 8) & 1)
-+#define G_008014_SPI1_BUSY(x) (((x) >> 9) & 1)
-+#define G_008014_SPI2_BUSY(x) (((x) >> 10) & 1)
-+#define G_008014_SPI3_BUSY(x) (((x) >> 11) & 1)
-+#define G_008014_TA0_BUSY(x) (((x) >> 12) & 1)
-+#define G_008014_TA1_BUSY(x) (((x) >> 13) & 1)
-+#define G_008014_TA2_BUSY(x) (((x) >> 14) & 1)
-+#define G_008014_TA3_BUSY(x) (((x) >> 15) & 1)
-+#define G_008014_DB0_BUSY(x) (((x) >> 16) & 1)
-+#define G_008014_DB1_BUSY(x) (((x) >> 17) & 1)
-+#define G_008014_DB2_BUSY(x) (((x) >> 18) & 1)
-+#define G_008014_DB3_BUSY(x) (((x) >> 19) & 1)
-+#define G_008014_CB0_BUSY(x) (((x) >> 20) & 1)
-+#define G_008014_CB1_BUSY(x) (((x) >> 21) & 1)
-+#define G_008014_CB2_BUSY(x) (((x) >> 22) & 1)
-+#define G_008014_CB3_BUSY(x) (((x) >> 23) & 1)
-+#define R_000E50_SRBM_STATUS 0x0E50
-+#define G_000E50_RLC_RQ_PENDING(x) (((x) >> 3) & 1)
-+#define G_000E50_RCU_RQ_PENDING(x) (((x) >> 4) & 1)
-+#define G_000E50_GRBM_RQ_PENDING(x) (((x) >> 5) & 1)
-+#define G_000E50_HI_RQ_PENDING(x) (((x) >> 6) & 1)
-+#define G_000E50_IO_EXTERN_SIGNAL(x) (((x) >> 7) & 1)
-+#define G_000E50_VMC_BUSY(x) (((x) >> 8) & 1)
-+#define G_000E50_MCB_BUSY(x) (((x) >> 9) & 1)
-+#define G_000E50_MCDZ_BUSY(x) (((x) >> 10) & 1)
-+#define G_000E50_MCDY_BUSY(x) (((x) >> 11) & 1)
-+#define G_000E50_MCDX_BUSY(x) (((x) >> 12) & 1)
-+#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
-+#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
-+#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
-+#define R_000E60_SRBM_SOFT_RESET 0x0E60
-+#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
-+#define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2)
-+#define S_000E60_SOFT_RESET_CMC(x) (((x) & 1) << 3)
-+#define S_000E60_SOFT_RESET_CSC(x) (((x) & 1) << 4)
-+#define S_000E60_SOFT_RESET_DC(x) (((x) & 1) << 5)
-+#define S_000E60_SOFT_RESET_GRBM(x) (((x) & 1) << 8)
-+#define S_000E60_SOFT_RESET_HDP(x) (((x) & 1) << 9)
-+#define S_000E60_SOFT_RESET_IH(x) (((x) & 1) << 10)
-+#define S_000E60_SOFT_RESET_MC(x) (((x) & 1) << 11)
-+#define S_000E60_SOFT_RESET_RLC(x) (((x) & 1) << 13)
-+#define S_000E60_SOFT_RESET_ROM(x) (((x) & 1) << 14)
-+#define S_000E60_SOFT_RESET_SEM(x) (((x) & 1) << 15)
-+#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
-+#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
-+
-+#endif
-diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
-index b519fb2..3299733 100644
---- a/drivers/gpu/drm/radeon/radeon.h
-+++ b/drivers/gpu/drm/radeon/radeon.h
-@@ -50,8 +50,8 @@
- #include <linux/kref.h>
-
- #include "radeon_mode.h"
-+#include "radeon_share.h"
- #include "radeon_reg.h"
--#include "r300.h"
-
- /*
- * Modules parameters.
-@@ -66,6 +66,7 @@ extern int radeon_gart_size;
- extern int radeon_benchmarking;
- extern int radeon_testing;
- extern int radeon_connector_table;
-+extern int radeon_tv;
-
- /*
- * Copy from radeon_drv.h so we don't have to include both and have conflicting
-@@ -111,10 +112,11 @@ enum radeon_family {
- CHIP_RV635,
- CHIP_RV670,
- CHIP_RS780,
-+ CHIP_RS880,
- CHIP_RV770,
- CHIP_RV730,
- CHIP_RV710,
-- CHIP_RS880,
-+ CHIP_RV740,
- CHIP_LAST,
- };
-
-@@ -151,10 +153,21 @@ struct radeon_device;
- */
- bool radeon_get_bios(struct radeon_device *rdev);
-
-+
- /*
-- * Clocks
-+ * Dummy page
- */
-+struct radeon_dummy_page {
-+ struct page *page;
-+ dma_addr_t addr;
-+};
-+int radeon_dummy_page_init(struct radeon_device *rdev);
-+void radeon_dummy_page_fini(struct radeon_device *rdev);
-+
-
-+/*
-+ * Clocks
-+ */
- struct radeon_clock {
- struct radeon_pll p1pll;
- struct radeon_pll p2pll;
-@@ -165,6 +178,7 @@ struct radeon_clock {
- uint32_t default_sclk;
- };
-
-+
- /*
- * Fences.
- */
-@@ -331,14 +345,18 @@ struct radeon_mc {
- resource_size_t aper_size;
- resource_size_t aper_base;
- resource_size_t agp_base;
-- unsigned gtt_location;
-- unsigned gtt_size;
-- unsigned vram_location;
- /* for some chips with <= 32MB we need to lie
- * about vram size near mc fb location */
-- unsigned mc_vram_size;
-+ u64 mc_vram_size;
-+ u64 gtt_location;
-+ u64 gtt_size;
-+ u64 gtt_start;
-+ u64 gtt_end;
-+ u64 vram_location;
-+ u64 vram_start;
-+ u64 vram_end;
- unsigned vram_width;
-- unsigned real_vram_size;
-+ u64 real_vram_size;
- int vram_mtrr;
- bool vram_is_ddr;
- };
-@@ -410,6 +428,16 @@ struct radeon_cp {
- bool ready;
- };
-
-+struct r600_blit {
-+ struct radeon_object *shader_obj;
-+ u64 shader_gpu_addr;
-+ u32 vs_offset, ps_offset;
-+ u32 state_offset;
-+ u32 state_len;
-+ u32 vb_used, vb_total;
-+ struct radeon_ib *vb_ib;
-+};
-+
- int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
- void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
- int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
-@@ -462,6 +490,7 @@ struct radeon_cs_parser {
- int chunk_relocs_idx;
- struct radeon_ib *ib;
- void *track;
-+ unsigned family;
- };
-
- struct radeon_cs_packet {
-@@ -558,6 +587,9 @@ int r100_debugfs_cp_init(struct radeon_device *rdev);
- */
- struct radeon_asic {
- int (*init)(struct radeon_device *rdev);
-+ void (*fini)(struct radeon_device *rdev);
-+ int (*resume)(struct radeon_device *rdev);
-+ int (*suspend)(struct radeon_device *rdev);
- void (*errata)(struct radeon_device *rdev);
- void (*vram_info)(struct radeon_device *rdev);
- int (*gpu_reset)(struct radeon_device *rdev);
-@@ -572,7 +604,11 @@ struct radeon_asic {
- int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
- void (*cp_fini)(struct radeon_device *rdev);
- void (*cp_disable)(struct radeon_device *rdev);
-+ void (*cp_commit)(struct radeon_device *rdev);
- void (*ring_start)(struct radeon_device *rdev);
-+ int (*ring_test)(struct radeon_device *rdev);
-+ void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
-+ int (*ib_test)(struct radeon_device *rdev);
- int (*irq_set)(struct radeon_device *rdev);
- int (*irq_process)(struct radeon_device *rdev);
- u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
-@@ -604,8 +640,16 @@ struct radeon_asic {
- void (*bandwidth_update)(struct radeon_device *rdev);
- };
-
-+struct r100_asic {
-+ const unsigned *reg_safe_bm;
-+ unsigned reg_safe_bm_size;
-+};
-+
- union radeon_asic_config {
- struct r300_asic r300;
-+ struct r100_asic r100;
-+ struct r600_asic r600;
-+ struct rv770_asic rv770;
- };
-
-
-@@ -691,11 +735,16 @@ struct radeon_device {
- struct radeon_pm pm;
- struct mutex cs_mutex;
- struct radeon_wb wb;
-+ struct radeon_dummy_page dummy_page;
- bool gpu_lockup;
- bool shutdown;
- bool suspend;
- bool need_dma32;
-+ bool new_init_path;
- struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
-+ const struct firmware *me_fw; /* all family ME firmware */
-+ const struct firmware *pfp_fw; /* r6/700 PFP firmware */
-+ struct r600_blit r600_blit;
- };
-
- int radeon_device_init(struct radeon_device *rdev,
-@@ -705,6 +754,13 @@ int radeon_device_init(struct radeon_device *rdev,
- void radeon_device_fini(struct radeon_device *rdev);
- int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-
-+/* r600 blit */
-+int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-+void r600_kms_blit_copy(struct radeon_device *rdev,
-+ u64 src_gpu_addr, u64 dst_gpu_addr,
-+ int size_bytes);
-+
- static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
- {
- if (reg < 0x10000)
-@@ -732,6 +788,7 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
- #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
- #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
- #define RREG32(reg) r100_mm_rreg(rdev, (reg))
-+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
- #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
- #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
- #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
-@@ -755,6 +812,7 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
- tmp_ |= ((val) & ~(mask)); \
- WREG32_PLL(reg, tmp_); \
- } while (0)
-+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
-
- /*
- * Indirect registers accessor
-@@ -819,51 +877,6 @@ void radeon_atombios_fini(struct radeon_device *rdev);
- /*
- * RING helpers.
- */
--#define CP_PACKET0 0x00000000
--#define PACKET0_BASE_INDEX_SHIFT 0
--#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
--#define PACKET0_COUNT_SHIFT 16
--#define PACKET0_COUNT_MASK (0x3fff << 16)
--#define CP_PACKET1 0x40000000
--#define CP_PACKET2 0x80000000
--#define PACKET2_PAD_SHIFT 0
--#define PACKET2_PAD_MASK (0x3fffffff << 0)
--#define CP_PACKET3 0xC0000000
--#define PACKET3_IT_OPCODE_SHIFT 8
--#define PACKET3_IT_OPCODE_MASK (0xff << 8)
--#define PACKET3_COUNT_SHIFT 16
--#define PACKET3_COUNT_MASK (0x3fff << 16)
--/* PACKET3 op code */
--#define PACKET3_NOP 0x10
--#define PACKET3_3D_DRAW_VBUF 0x28
--#define PACKET3_3D_DRAW_IMMD 0x29
--#define PACKET3_3D_DRAW_INDX 0x2A
--#define PACKET3_3D_LOAD_VBPNTR 0x2F
--#define PACKET3_INDX_BUFFER 0x33
--#define PACKET3_3D_DRAW_VBUF_2 0x34
--#define PACKET3_3D_DRAW_IMMD_2 0x35
--#define PACKET3_3D_DRAW_INDX_2 0x36
--#define PACKET3_BITBLT_MULTI 0x9B
--
--#define PACKET0(reg, n) (CP_PACKET0 | \
-- REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
-- REG_SET(PACKET0_COUNT, (n)))
--#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
--#define PACKET3(op, n) (CP_PACKET3 | \
-- REG_SET(PACKET3_IT_OPCODE, (op)) | \
-- REG_SET(PACKET3_COUNT, (n)))
--
--#define PACKET_TYPE0 0
--#define PACKET_TYPE1 1
--#define PACKET_TYPE2 2
--#define PACKET_TYPE3 3
--
--#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
--#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
--#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
--#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
--#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
--
- static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
- {
- #if DRM_DEBUG_CODE
-@@ -882,6 +895,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
- * ASICs macro.
- */
- #define radeon_init(rdev) (rdev)->asic->init((rdev))
-+#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
-+#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
-+#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
- #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
- #define radeon_errata(rdev) (rdev)->asic->errata((rdev))
- #define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
-@@ -897,7 +913,11 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
- #define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
- #define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
- #define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
-+#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
- #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
-+#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
-+#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
-+#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev))
- #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
- #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
- #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
-diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
-index 93d8f88..e87bb91 100644
---- a/drivers/gpu/drm/radeon/radeon_asic.h
-+++ b/drivers/gpu/drm/radeon/radeon_asic.h
-@@ -42,6 +42,7 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
- * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
- */
- int r100_init(struct radeon_device *rdev);
-+int r200_init(struct radeon_device *rdev);
- uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
- void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
- void r100_errata(struct radeon_device *rdev);
-@@ -59,6 +60,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
- int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
- void r100_cp_fini(struct radeon_device *rdev);
- void r100_cp_disable(struct radeon_device *rdev);
-+void r100_cp_commit(struct radeon_device *rdev);
- void r100_ring_start(struct radeon_device *rdev);
- int r100_irq_set(struct radeon_device *rdev);
- int r100_irq_process(struct radeon_device *rdev);
-@@ -77,6 +79,9 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
- uint32_t offset, uint32_t obj_size);
- int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
- void r100_bandwidth_update(struct radeon_device *rdev);
-+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-+int r100_ib_test(struct radeon_device *rdev);
-+int r100_ring_test(struct radeon_device *rdev);
-
- static struct radeon_asic r100_asic = {
- .init = &r100_init,
-@@ -94,7 +99,11 @@ static struct radeon_asic r100_asic = {
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
-+ .cp_commit = &r100_cp_commit,
- .ring_start = &r100_ring_start,
-+ .ring_test = &r100_ring_test,
-+ .ring_ib_execute = &r100_ring_ib_execute,
-+ .ib_test = &r100_ib_test,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
-@@ -155,7 +164,11 @@ static struct radeon_asic r300_asic = {
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
-+ .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
-+ .ring_test = &r100_ring_test,
-+ .ring_ib_execute = &r100_ring_ib_execute,
-+ .ib_test = &r100_ib_test,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
-@@ -196,7 +209,11 @@ static struct radeon_asic r420_asic = {
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
-+ .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
-+ .ring_test = &r100_ring_test,
-+ .ring_ib_execute = &r100_ring_ib_execute,
-+ .ib_test = &r100_ib_test,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
-@@ -244,7 +261,11 @@ static struct radeon_asic rs400_asic = {
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
-+ .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
-+ .ring_test = &r100_ring_test,
-+ .ring_ib_execute = &r100_ring_ib_execute,
-+ .ib_test = &r100_ib_test,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
-@@ -266,7 +287,7 @@ static struct radeon_asic rs400_asic = {
- /*
- * rs600.
- */
--int rs600_init(struct radeon_device *dev);
-+int rs600_init(struct radeon_device *rdev);
- void rs600_errata(struct radeon_device *rdev);
- void rs600_vram_info(struct radeon_device *rdev);
- int rs600_mc_init(struct radeon_device *rdev);
-@@ -297,7 +318,11 @@ static struct radeon_asic rs600_asic = {
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
-+ .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
-+ .ring_test = &r100_ring_test,
-+ .ring_ib_execute = &r100_ring_ib_execute,
-+ .ib_test = &r100_ib_test,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
-@@ -340,7 +365,11 @@ static struct radeon_asic rs690_asic = {
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
-+ .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
-+ .ring_test = &r100_ring_test,
-+ .ring_ib_execute = &r100_ring_ib_execute,
-+ .ib_test = &r100_ib_test,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
-@@ -390,7 +419,11 @@ static struct radeon_asic rv515_asic = {
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
-+ .cp_commit = &r100_cp_commit,
- .ring_start = &rv515_ring_start,
-+ .ring_test = &r100_ring_test,
-+ .ring_ib_execute = &r100_ring_ib_execute,
-+ .ib_test = &r100_ib_test,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
-@@ -433,7 +466,11 @@ static struct radeon_asic r520_asic = {
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
-+ .cp_commit = &r100_cp_commit,
- .ring_start = &rv515_ring_start,
-+ .ring_test = &r100_ring_test,
-+ .ring_ib_execute = &r100_ring_ib_execute,
-+ .ib_test = &r100_ib_test,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
-@@ -452,9 +489,127 @@ static struct radeon_asic r520_asic = {
- };
-
- /*
-- * r600,rv610,rv630,rv620,rv635,rv670,rs780,rv770,rv730,rv710
-+ * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
- */
-+int r600_init(struct radeon_device *rdev);
-+void r600_fini(struct radeon_device *rdev);
-+int r600_suspend(struct radeon_device *rdev);
-+int r600_resume(struct radeon_device *rdev);
-+int r600_wb_init(struct radeon_device *rdev);
-+void r600_wb_fini(struct radeon_device *rdev);
-+void r600_cp_commit(struct radeon_device *rdev);
-+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
- uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
- void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-+int r600_cs_parse(struct radeon_cs_parser *p);
-+void r600_fence_ring_emit(struct radeon_device *rdev,
-+ struct radeon_fence *fence);
-+int r600_copy_dma(struct radeon_device *rdev,
-+ uint64_t src_offset,
-+ uint64_t dst_offset,
-+ unsigned num_pages,
-+ struct radeon_fence *fence);
-+int r600_irq_process(struct radeon_device *rdev);
-+int r600_irq_set(struct radeon_device *rdev);
-+int r600_gpu_reset(struct radeon_device *rdev);
-+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
-+ uint32_t tiling_flags, uint32_t pitch,
-+ uint32_t offset, uint32_t obj_size);
-+int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
-+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-+int r600_ib_test(struct radeon_device *rdev);
-+int r600_ring_test(struct radeon_device *rdev);
-+int r600_copy_blit(struct radeon_device *rdev,
-+ uint64_t src_offset, uint64_t dst_offset,
-+ unsigned num_pages, struct radeon_fence *fence);
-+
-+static struct radeon_asic r600_asic = {
-+ .errata = NULL,
-+ .init = &r600_init,
-+ .fini = &r600_fini,
-+ .suspend = &r600_suspend,
-+ .resume = &r600_resume,
-+ .cp_commit = &r600_cp_commit,
-+ .vram_info = NULL,
-+ .gpu_reset = &r600_gpu_reset,
-+ .mc_init = NULL,
-+ .mc_fini = NULL,
-+ .wb_init = &r600_wb_init,
-+ .wb_fini = &r600_wb_fini,
-+ .gart_enable = NULL,
-+ .gart_disable = NULL,
-+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
-+ .gart_set_page = &rs600_gart_set_page,
-+ .cp_init = NULL,
-+ .cp_fini = NULL,
-+ .cp_disable = NULL,
-+ .ring_start = NULL,
-+ .ring_test = &r600_ring_test,
-+ .ring_ib_execute = &r600_ring_ib_execute,
-+ .ib_test = &r600_ib_test,
-+ .irq_set = &r600_irq_set,
-+ .irq_process = &r600_irq_process,
-+ .fence_ring_emit = &r600_fence_ring_emit,
-+ .cs_parse = &r600_cs_parse,
-+ .copy_blit = &r600_copy_blit,
-+ .copy_dma = &r600_copy_blit,
-+ .copy = NULL,
-+ .set_engine_clock = &radeon_atom_set_engine_clock,
-+ .set_memory_clock = &radeon_atom_set_memory_clock,
-+ .set_pcie_lanes = NULL,
-+ .set_clock_gating = &radeon_atom_set_clock_gating,
-+ .set_surface_reg = r600_set_surface_reg,
-+ .clear_surface_reg = r600_clear_surface_reg,
-+ .bandwidth_update = &r520_bandwidth_update,
-+};
-+
-+/*
-+ * rv770,rv730,rv710,rv740
-+ */
-+int rv770_init(struct radeon_device *rdev);
-+void rv770_fini(struct radeon_device *rdev);
-+int rv770_suspend(struct radeon_device *rdev);
-+int rv770_resume(struct radeon_device *rdev);
-+int rv770_gpu_reset(struct radeon_device *rdev);
-+
-+static struct radeon_asic rv770_asic = {
-+ .errata = NULL,
-+ .init = &rv770_init,
-+ .fini = &rv770_fini,
-+ .suspend = &rv770_suspend,
-+ .resume = &rv770_resume,
-+ .cp_commit = &r600_cp_commit,
-+ .vram_info = NULL,
-+ .gpu_reset = &rv770_gpu_reset,
-+ .mc_init = NULL,
-+ .mc_fini = NULL,
-+ .wb_init = &r600_wb_init,
-+ .wb_fini = &r600_wb_fini,
-+ .gart_enable = NULL,
-+ .gart_disable = NULL,
-+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
-+ .gart_set_page = &rs600_gart_set_page,
-+ .cp_init = NULL,
-+ .cp_fini = NULL,
-+ .cp_disable = NULL,
-+ .ring_start = NULL,
-+ .ring_test = &r600_ring_test,
-+ .ring_ib_execute = &r600_ring_ib_execute,
-+ .ib_test = &r600_ib_test,
-+ .irq_set = &r600_irq_set,
-+ .irq_process = &r600_irq_process,
-+ .fence_ring_emit = &r600_fence_ring_emit,
-+ .cs_parse = &r600_cs_parse,
-+ .copy_blit = &r600_copy_blit,
-+ .copy_dma = &r600_copy_blit,
-+ .copy = NULL,
-+ .set_engine_clock = &radeon_atom_set_engine_clock,
-+ .set_memory_clock = &radeon_atom_set_memory_clock,
-+ .set_pcie_lanes = NULL,
-+ .set_clock_gating = &radeon_atom_set_clock_gating,
-+ .set_surface_reg = r600_set_surface_reg,
-+ .clear_surface_reg = r600_clear_surface_reg,
-+ .bandwidth_update = &r520_bandwidth_update,
-+};
-
- #endif
-diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
-index fcfe5c0..a8fb392 100644
---- a/drivers/gpu/drm/radeon/radeon_atombios.c
-+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
-@@ -370,10 +370,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
- && record->
- ucRecordType <=
- ATOM_MAX_OBJECT_RECORD_NUMBER) {
-- DRM_ERROR
-- ("record type %d\n",
-- record->
-- ucRecordType);
- switch (record->
- ucRecordType) {
- case ATOM_I2C_RECORD_TYPE:
-@@ -471,11 +467,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
- continue;
- }
-
-- if (i == ATOM_DEVICE_TV1_INDEX) {
-- DRM_DEBUG("Skipping TV Out\n");
-- continue;
-- }
--
- bios_connectors[i].connector_type =
- supported_devices_connector_convert[ci.sucConnectorInfo.
- sbfAccess.
-@@ -858,6 +849,72 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
- return p_dac;
- }
-
-+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
-+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing,
-+ int32_t *pixel_clock)
-+{
-+ struct radeon_mode_info *mode_info = &rdev->mode_info;
-+ ATOM_ANALOG_TV_INFO *tv_info;
-+ ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2;
-+ ATOM_DTD_FORMAT *dtd_timings;
-+ int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
-+ u8 frev, crev;
-+ uint16_t data_offset;
-+
-+ atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
-+
-+ switch (crev) {
-+ case 1:
-+ tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
-+ if (index > MAX_SUPPORTED_TV_TIMING)
-+ return false;
-+
-+ crtc_timing->usH_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
-+ crtc_timing->usH_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
-+ crtc_timing->usH_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
-+ crtc_timing->usH_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
-+
-+ crtc_timing->usV_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
-+ crtc_timing->usV_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
-+ crtc_timing->usV_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
-+ crtc_timing->usV_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
-+
-+ crtc_timing->susModeMiscInfo = tv_info->aModeTimings[index].susModeMiscInfo;
-+
-+ crtc_timing->ucOverscanRight = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanRight);
-+ crtc_timing->ucOverscanLeft = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanLeft);
-+ crtc_timing->ucOverscanBottom = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanBottom);
-+ crtc_timing->ucOverscanTop = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanTop);
-+ *pixel_clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
-+
-+ if (index == 1) {
-+ /* PAL timings appear to have wrong values for totals */
-+ crtc_timing->usH_Total -= 1;
-+ crtc_timing->usV_Total -= 1;
-+ }
-+ break;
-+ case 2:
-+ tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
-+ if (index > MAX_SUPPORTED_TV_TIMING_V1_2)
-+ return false;
-+
-+ dtd_timings = &tv_info_v1_2->aModeTimings[index];
-+ crtc_timing->usH_Total = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time);
-+ crtc_timing->usH_Disp = le16_to_cpu(dtd_timings->usHActive);
-+ crtc_timing->usH_SyncStart = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset);
-+ crtc_timing->usH_SyncWidth = le16_to_cpu(dtd_timings->usHSyncWidth);
-+ crtc_timing->usV_Total = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time);
-+ crtc_timing->usV_Disp = le16_to_cpu(dtd_timings->usVActive);
-+ crtc_timing->usV_SyncStart = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset);
-+ crtc_timing->usV_SyncWidth = le16_to_cpu(dtd_timings->usVSyncWidth);
-+
-+ crtc_timing->susModeMiscInfo.usAccess = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
-+ *pixel_clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
-+ break;
-+ }
-+ return true;
-+}
-+
- struct radeon_encoder_tv_dac *
- radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
- {
-@@ -948,10 +1005,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
- uint32_t bios_2_scratch, bios_6_scratch;
-
- if (rdev->family >= CHIP_R600) {
-- bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
-+ bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
- bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
- } else {
-- bios_2_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
-+ bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
- bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
- }
-
-diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
-index a37cbce..152eef1 100644
---- a/drivers/gpu/drm/radeon/radeon_clocks.c
-+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
-@@ -102,10 +102,12 @@ void radeon_get_clock_info(struct drm_device *dev)
- p1pll->reference_div = 12;
- if (p2pll->reference_div < 2)
- p2pll->reference_div = 12;
-- if (spll->reference_div < 2)
-- spll->reference_div =
-- RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
-- RADEON_M_SPLL_REF_DIV_MASK;
-+ if (rdev->family < CHIP_RS600) {
-+ if (spll->reference_div < 2)
-+ spll->reference_div =
-+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
-+ RADEON_M_SPLL_REF_DIV_MASK;
-+ }
- if (mpll->reference_div < 2)
- mpll->reference_div = spll->reference_div;
- } else {
-diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
-index 70ede6a..6a2b029 100644
---- a/drivers/gpu/drm/radeon/radeon_connectors.c
-+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
-@@ -94,6 +94,54 @@ struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
- return NULL;
- }
-
-+
-+/*
-+ * radeon_connector_analog_encoder_conflict_solve
-+ * - search for other connectors sharing this encoder
-+ * if priority is true, then set them disconnected if this is connected
-+ * if priority is false, set us disconnected if they are connected
-+ */
-+static enum drm_connector_status
-+radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
-+ struct drm_encoder *encoder,
-+ enum drm_connector_status current_status,
-+ bool priority)
-+{
-+ struct drm_device *dev = connector->dev;
-+ struct drm_connector *conflict;
-+ int i;
-+
-+ list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
-+ if (conflict == connector)
-+ continue;
-+
-+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-+ if (conflict->encoder_ids[i] == 0)
-+ break;
-+
-+ /* if the IDs match */
-+ if (conflict->encoder_ids[i] == encoder->base.id) {
-+ if (conflict->status != connector_status_connected)
-+ continue;
-+
-+ if (priority == true) {
-+ DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
-+ DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
-+ conflict->status = connector_status_disconnected;
-+ radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
-+ } else {
-+ DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
-+ DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
-+ current_status = connector_status_disconnected;
-+ }
-+ break;
-+ }
-+ }
-+ }
-+ return current_status;
-+
-+}
-+
- static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
- {
- struct drm_device *dev = encoder->dev;
-@@ -213,7 +261,6 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
- static int radeon_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
- {
--
- return MODE_OK;
- }
-
-@@ -225,22 +272,22 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
- bool dret;
- enum drm_connector_status ret = connector_status_disconnected;
-
-+ encoder = radeon_best_single_encoder(connector);
-+ if (!encoder)
-+ ret = connector_status_disconnected;
-+
- radeon_i2c_do_lock(radeon_connector, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
- if (dret)
- ret = connector_status_connected;
- else {
-- /* if EDID fails to a load detect */
-- encoder = radeon_best_single_encoder(connector);
-- if (!encoder)
-- ret = connector_status_disconnected;
-- else {
-- encoder_funcs = encoder->helper_private;
-- ret = encoder_funcs->detect(encoder, connector);
-- }
-+ encoder_funcs = encoder->helper_private;
-+ ret = encoder_funcs->detect(encoder, connector);
- }
-
-+ if (ret == connector_status_connected)
-+ ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
- radeon_connector_update_scratch_regs(connector, ret);
- return ret;
- }
-@@ -259,21 +306,87 @@ struct drm_connector_funcs radeon_vga_connector_funcs = {
- .set_property = radeon_connector_set_property,
- };
-
-+static struct drm_display_mode tv_fixed_mode = {
-+ DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 38250, 800, 832,
-+ 912, 1024, 0, 600, 603, 607, 624, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC),
-+};
-+
-+static int radeon_tv_get_modes(struct drm_connector *connector)
-+{
-+ struct drm_device *dev = connector->dev;
-+ struct drm_display_mode *tv_mode;
-+
-+ tv_mode = drm_mode_duplicate(dev, &tv_fixed_mode);
-+ tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-+
-+ drm_mode_probed_add(connector, tv_mode);
-+
-+ return 1;
-+}
-+
-+static int radeon_tv_mode_valid(struct drm_connector *connector,
-+ struct drm_display_mode *mode)
-+{
-+ return MODE_OK;
-+}
-+
-+static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector)
-+{
-+ struct drm_encoder *encoder;
-+ struct drm_encoder_helper_funcs *encoder_funcs;
-+ int ret;
-+
-+ encoder = radeon_best_single_encoder(connector);
-+ if (!encoder)
-+ ret = connector_status_disconnected;
-+ else {
-+ encoder_funcs = encoder->helper_private;
-+ ret = encoder_funcs->detect(encoder, connector);
-+ }
-+ if (ret == connector_status_connected)
-+ ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
-+ radeon_connector_update_scratch_regs(connector, ret);
-+ return ret;
-+}
-+
-+struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
-+ .get_modes = radeon_tv_get_modes,
-+ .mode_valid = radeon_tv_mode_valid,
-+ .best_encoder = radeon_best_single_encoder,
-+};
-+
-+struct drm_connector_funcs radeon_tv_connector_funcs = {
-+ .dpms = drm_helper_connector_dpms,
-+ .detect = radeon_tv_detect,
-+ .fill_modes = drm_helper_probe_single_connector_modes,
-+ .destroy = radeon_connector_destroy,
-+ .set_property = radeon_connector_set_property,
-+};
-+
- static int radeon_dvi_get_modes(struct drm_connector *connector)
- {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- ret = radeon_ddc_get_modes(radeon_connector);
-- /* reset scratch regs here since radeon_dvi_detect doesn't check digital bit */
-- radeon_connector_update_scratch_regs(connector, connector_status_connected);
- return ret;
- }
-
-+/*
-+ * DVI is complicated
-+ * Do a DDC probe, if DDC probe passes, get the full EDID so
-+ * we can do analog/digital monitor detection at this point.
-+ * If the monitor is an analog monitor or we got no DDC,
-+ * we need to find the DAC encoder object for this connector.
-+ * If we got no DDC, we do load detection on the DAC encoder object.
-+ * If we got analog DDC or load detection passes on the DAC encoder
-+ * we have to check if this analog encoder is shared with anyone else (TV)
-+ * if its shared we have to set the other connector to disconnected.
-+ */
- static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
- {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-- struct drm_encoder *encoder;
-+ struct drm_encoder *encoder = NULL;
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_mode_object *obj;
- int i;
-@@ -283,32 +396,58 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
- radeon_i2c_do_lock(radeon_connector, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
-- if (dret)
-- ret = connector_status_connected;
-- else {
-- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-- if (connector->encoder_ids[i] == 0)
-- break;
-+ if (dret) {
-+ radeon_i2c_do_lock(radeon_connector, 1);
-+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-+ radeon_i2c_do_lock(radeon_connector, 0);
-+
-+ if (!radeon_connector->edid) {
-+ DRM_ERROR("DDC responded but not EDID found for %s\n",
-+ drm_get_connector_name(connector));
-+ } else {
-+ radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
-+
-+ /* if this isn't a digital monitor
-+ then we need to make sure we don't have any
-+ TV conflicts */
-+ ret = connector_status_connected;
-+ }
-+ }
-+
-+ if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
-+ goto out;
-+
-+ /* find analog encoder */
-+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-+ if (connector->encoder_ids[i] == 0)
-+ break;
-
-- obj = drm_mode_object_find(connector->dev,
-- connector->encoder_ids[i],
-- DRM_MODE_OBJECT_ENCODER);
-- if (!obj)
-- continue;
-+ obj = drm_mode_object_find(connector->dev,
-+ connector->encoder_ids[i],
-+ DRM_MODE_OBJECT_ENCODER);
-+ if (!obj)
-+ continue;
-
-- encoder = obj_to_encoder(obj);
-+ encoder = obj_to_encoder(obj);
-
-- encoder_funcs = encoder->helper_private;
-- if (encoder_funcs->detect) {
-+ encoder_funcs = encoder->helper_private;
-+ if (encoder_funcs->detect) {
-+ if (ret != connector_status_connected) {
- ret = encoder_funcs->detect(encoder, connector);
- if (ret == connector_status_connected) {
-- radeon_connector->use_digital = 0;
-- break;
-+ radeon_connector->use_digital = false;
- }
- }
-+ break;
- }
- }
-
-+ if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) &&
-+ encoder) {
-+ ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
-+ }
-+
-+out:
- /* updated in get modes as well since we need to know if it's analog or digital */
- radeon_connector_update_scratch_regs(connector, ret);
- return ret;
-@@ -332,7 +471,7 @@ struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
-
- encoder = obj_to_encoder(obj);
-
-- if (radeon_connector->use_digital) {
-+ if (radeon_connector->use_digital == true) {
- if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
- return encoder;
- } else {
-@@ -385,10 +524,7 @@ radeon_add_atom_connector(struct drm_device *dev,
- uint32_t subpixel_order = SubPixelNone;
-
- /* fixme - tv/cv/din */
-- if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
-- (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
-- (connector_type == DRM_MODE_CONNECTOR_Composite) ||
-- (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
-+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
- return;
-
- /* see if we already added it */
-@@ -480,6 +616,10 @@ radeon_add_atom_connector(struct drm_device *dev,
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_9PinDIN:
-+ if (radeon_tv == 1) {
-+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-+ }
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
-@@ -522,10 +662,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
- uint32_t subpixel_order = SubPixelNone;
-
- /* fixme - tv/cv/din */
-- if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
-- (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
-- (connector_type == DRM_MODE_CONNECTOR_Composite) ||
-- (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
-+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
- return;
-
- /* see if we already added it */
-@@ -578,6 +715,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_9PinDIN:
-+ if (radeon_tv == 1) {
-+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-+ }
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
-index 7a52c46..fa063d0 100644
---- a/drivers/gpu/drm/radeon/radeon_cp.c
-+++ b/drivers/gpu/drm/radeon/radeon_cp.c
-@@ -36,10 +36,19 @@
- #include "radeon_drv.h"
- #include "r300_reg.h"
-
--#include "radeon_microcode.h"
--
- #define RADEON_FIFO_DEBUG 0
-
-+/* Firmware Names */
-+#define FIRMWARE_R100 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_R200 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_R300 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_R420 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_RS690 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_RS600 "/*(DEBLOBBED)*/"
-+#define FIRMWARE_R520 "/*(DEBLOBBED)*/"
-+
-+/*(DEBLOBBED)*/
-+
- static int radeon_do_cleanup_cp(struct drm_device * dev);
- static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
-
-@@ -460,13 +475,99 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
- */
-
- /* Load the microcode for the CP */
--#define radeon_cp_load_microcode(dev_priv) \
-- do { \
-- DRM_ERROR("Missing Free microcode!\n"); \
-- radeon_do_cleanup_cp(dev); \
-- return -EINVAL; \
-- } while (0)
--/*(DEBLOBBED)*/
-+static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv)
-+{
-+ struct platform_device *pdev;
-+ const char *fw_name = NULL;
-+ int err;
-+
-+ DRM_DEBUG("\n");
-+
-+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
-+ err = IS_ERR(pdev);
-+ if (err) {
-+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
-+ return -EINVAL;
-+ }
-+
-+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
-+ DRM_INFO("Loading R100 Microcode\n");
-+ fw_name = FIRMWARE_R100;
-+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
-+ DRM_INFO("Loading R200 Microcode\n");
-+ fw_name = FIRMWARE_R200;
-+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
-+ DRM_INFO("Loading R300 Microcode\n");
-+ fw_name = FIRMWARE_R300;
-+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
-+ DRM_INFO("Loading R400 Microcode\n");
-+ fw_name = FIRMWARE_R420;
-+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
-+ DRM_INFO("Loading RS690/RS740 Microcode\n");
-+ fw_name = FIRMWARE_RS690;
-+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
-+ DRM_INFO("Loading RS600 Microcode\n");
-+ fw_name = FIRMWARE_RS600;
-+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
-+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
-+ DRM_INFO("Loading R500 Microcode\n");
-+ fw_name = FIRMWARE_R520;
-+ }
-+
-+ err = reject_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
-+ platform_device_unregister(pdev);
-+ if (err) {
-+ printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
-+ fw_name);
-+ } else if (dev_priv->me_fw->size % 8) {
-+ printk(KERN_ERR
-+ "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
-+ dev_priv->me_fw->size, fw_name);
-+ err = -EINVAL;
-+ release_firmware(dev_priv->me_fw);
-+ dev_priv->me_fw = NULL;
-+ }
-+ return err;
-+}
-+
-+static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv)
-+{
-+ const __be32 *fw_data;
-+ int i, size;
-+
-+ radeon_do_wait_for_idle(dev_priv);
-+
-+ if (dev_priv->me_fw) {
-+ size = dev_priv->me_fw->size / 4;
-+ fw_data = (const __be32 *)&dev_priv->me_fw->data[0];
-+ RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
-+ for (i = 0; i < size; i += 2) {
-+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
-+ be32_to_cpup(&fw_data[i]));
-+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
-+ be32_to_cpup(&fw_data[i + 1]));
-+ }
-+ }
-+}
-
- /* Flush any pending commands to the CP. This should only be used just
- * prior to a wait for idle, as it informs the engine that the command
-@@ -1495,6 +1517,14 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
- radeon_set_pcigart(dev_priv, 1);
- }
-
-+ if (!dev_priv->me_fw) {
-+ int err = radeon_cp_init_microcode(dev_priv);
-+ if (err) {
-+ DRM_ERROR("Failed to load firmware!\n");
-+ radeon_do_cleanup_cp(dev);
-+ return err;
-+ }
-+ }
- radeon_cp_load_microcode(dev_priv);
- radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
-
-@@ -1764,6 +1794,14 @@ void radeon_do_release(struct drm_device * dev)
- r600_do_cleanup_cp(dev);
- else
- radeon_do_cleanup_cp(dev);
-+ if (dev_priv->me_fw) {
-+ release_firmware(dev_priv->me_fw);
-+ dev_priv->me_fw = NULL;
-+ }
-+ if (dev_priv->pfp_fw) {
-+ release_firmware(dev_priv->pfp_fw);
-+ dev_priv->pfp_fw = NULL;
-+ }
- }
- }
-
-diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index 7693f7c..f2469c5 100644
---- a/drivers/gpu/drm/radeon/radeon_device.c
-+++ b/drivers/gpu/drm/radeon/radeon_device.c
-@@ -37,7 +37,7 @@
- /*
- * Clear GPU surface registers.
- */
--static void radeon_surface_init(struct radeon_device *rdev)
-+void radeon_surface_init(struct radeon_device *rdev)
- {
- /* FIXME: check this out */
- if (rdev->family < CHIP_R600) {
-@@ -56,7 +56,7 @@ static void radeon_surface_init(struct radeon_device *rdev)
- /*
- * GPU scratch registers helpers function.
- */
--static void radeon_scratch_init(struct radeon_device *rdev)
-+void radeon_scratch_init(struct radeon_device *rdev)
- {
- int i;
-
-@@ -156,16 +156,14 @@ int radeon_mc_setup(struct radeon_device *rdev)
- tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
- rdev->mc.gtt_location = tmp;
- }
-- DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20);
-+ DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
- DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
-- rdev->mc.vram_location,
-- rdev->mc.vram_location + rdev->mc.mc_vram_size - 1);
-- if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size)
-- DRM_INFO("radeon: VRAM less than aperture workaround enabled\n");
-- DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
-+ (unsigned)rdev->mc.vram_location,
-+ (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
-+ DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
- DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
-- rdev->mc.gtt_location,
-- rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
-+ (unsigned)rdev->mc.gtt_location,
-+ (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
- return 0;
- }
-
-@@ -205,6 +203,31 @@ static bool radeon_card_posted(struct radeon_device *rdev)
-
- }
-
-+int radeon_dummy_page_init(struct radeon_device *rdev)
-+{
-+ rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
-+ if (rdev->dummy_page.page == NULL)
-+ return -ENOMEM;
-+ rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
-+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-+ if (!rdev->dummy_page.addr) {
-+ __free_page(rdev->dummy_page.page);
-+ rdev->dummy_page.page = NULL;
-+ return -ENOMEM;
-+ }
-+ return 0;
-+}
-+
-+void radeon_dummy_page_fini(struct radeon_device *rdev)
-+{
-+ if (rdev->dummy_page.page == NULL)
-+ return;
-+ pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
-+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-+ __free_page(rdev->dummy_page.page);
-+ rdev->dummy_page.page = NULL;
-+}
-+
-
- /*
- * Registers accessors functions.
-@@ -323,9 +346,15 @@ int radeon_asic_init(struct radeon_device *rdev)
- case CHIP_RV635:
- case CHIP_RV670:
- case CHIP_RS780:
-+ case CHIP_RS880:
-+ rdev->asic = &r600_asic;
-+ break;
- case CHIP_RV770:
- case CHIP_RV730:
- case CHIP_RV710:
-+ case CHIP_RV740:
-+ rdev->asic = &rv770_asic;
-+ break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
-@@ -448,7 +477,7 @@ int radeon_device_init(struct radeon_device *rdev,
- struct pci_dev *pdev,
- uint32_t flags)
- {
-- int r, ret;
-+ int r, ret = 0;
- int dma_bits;
-
- DRM_INFO("radeon: Initializing kernel modesetting.\n");
-@@ -487,10 +516,6 @@ int radeon_device_init(struct radeon_device *rdev,
- if (r) {
- return r;
- }
-- r = radeon_init(rdev);
-- if (r) {
-- return r;
-- }
-
- /* set DMA mask + need_dma32 flags.
- * PCIE - can handle 40-bits.
-@@ -521,111 +546,118 @@ int radeon_device_init(struct radeon_device *rdev,
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
-
-- /* Setup errata flags */
-- radeon_errata(rdev);
-- /* Initialize scratch registers */
-- radeon_scratch_init(rdev);
-- /* Initialize surface registers */
-- radeon_surface_init(rdev);
--
-- /* TODO: disable VGA need to use VGA request */
-- /* BIOS*/
-- if (!radeon_get_bios(rdev)) {
-- if (ASIC_IS_AVIVO(rdev))
-- return -EINVAL;
-- }
-- if (rdev->is_atom_bios) {
-- r = radeon_atombios_init(rdev);
-+ rdev->new_init_path = false;
-+ r = radeon_init(rdev);
-+ if (r) {
-+ return r;
-+ }
-+ if (!rdev->new_init_path) {
-+ /* Setup errata flags */
-+ radeon_errata(rdev);
-+ /* Initialize scratch registers */
-+ radeon_scratch_init(rdev);
-+ /* Initialize surface registers */
-+ radeon_surface_init(rdev);
-+
-+ /* TODO: disable VGA need to use VGA request */
-+ /* BIOS*/
-+ if (!radeon_get_bios(rdev)) {
-+ if (ASIC_IS_AVIVO(rdev))
-+ return -EINVAL;
-+ }
-+ if (rdev->is_atom_bios) {
-+ r = radeon_atombios_init(rdev);
-+ if (r) {
-+ return r;
-+ }
-+ } else {
-+ r = radeon_combios_init(rdev);
-+ if (r) {
-+ return r;
-+ }
-+ }
-+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-+ if (radeon_gpu_reset(rdev)) {
-+ /* FIXME: what do we want to do here ? */
-+ }
-+ /* check if cards are posted or not */
-+ if (!radeon_card_posted(rdev) && rdev->bios) {
-+ DRM_INFO("GPU not posted. posting now...\n");
-+ if (rdev->is_atom_bios) {
-+ atom_asic_init(rdev->mode_info.atom_context);
-+ } else {
-+ radeon_combios_asic_init(rdev->ddev);
-+ }
-+ }
-+ /* Initialize clocks */
-+ r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
-- } else {
-- r = radeon_combios_init(rdev);
-+ /* Get vram informations */
-+ radeon_vram_info(rdev);
-+
-+ /* Add an MTRR for the VRAM */
-+ rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
-+ MTRR_TYPE_WRCOMB, 1);
-+ DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
-+ (unsigned)(rdev->mc.mc_vram_size >> 20),
-+ (unsigned)(rdev->mc.aper_size >> 20));
-+ DRM_INFO("RAM width %dbits %cDR\n",
-+ rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
-+ /* Initialize memory controller (also test AGP) */
-+ r = radeon_mc_init(rdev);
- if (r) {
- return r;
- }
-- }
-- /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-- if (radeon_gpu_reset(rdev)) {
-- /* FIXME: what do we want to do here ? */
-- }
-- /* check if cards are posted or not */
-- if (!radeon_card_posted(rdev) && rdev->bios) {
-- DRM_INFO("GPU not posted. posting now...\n");
-- if (rdev->is_atom_bios) {
-- atom_asic_init(rdev->mode_info.atom_context);
-- } else {
-- radeon_combios_asic_init(rdev->ddev);
-- }
-- }
-- /* Initialize clocks */
-- r = radeon_clocks_init(rdev);
-- if (r) {
-- return r;
-- }
-- /* Get vram informations */
-- radeon_vram_info(rdev);
--
-- /* Add an MTRR for the VRAM */
-- rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
-- MTRR_TYPE_WRCOMB, 1);
-- DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
-- rdev->mc.real_vram_size >> 20,
-- (unsigned)rdev->mc.aper_size >> 20);
-- DRM_INFO("RAM width %dbits %cDR\n",
-- rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
-- /* Initialize memory controller (also test AGP) */
-- r = radeon_mc_init(rdev);
-- if (r) {
-- return r;
-- }
-- /* Fence driver */
-- r = radeon_fence_driver_init(rdev);
-- if (r) {
-- return r;
-- }
-- r = radeon_irq_kms_init(rdev);
-- if (r) {
-- return r;
-- }
-- /* Memory manager */
-- r = radeon_object_init(rdev);
-- if (r) {
-- return r;
-- }
-- /* Initialize GART (initialize after TTM so we can allocate
-- * memory through TTM but finalize after TTM) */
-- r = radeon_gart_enable(rdev);
-- if (!r) {
-- r = radeon_gem_init(rdev);
-- }
--
-- /* 1M ring buffer */
-- if (!r) {
-- r = radeon_cp_init(rdev, 1024 * 1024);
-- }
-- if (!r) {
-- r = radeon_wb_init(rdev);
-+ /* Fence driver */
-+ r = radeon_fence_driver_init(rdev);
- if (r) {
-- DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
- return r;
- }
-- }
-- if (!r) {
-- r = radeon_ib_pool_init(rdev);
-+ r = radeon_irq_kms_init(rdev);
- if (r) {
-- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
- return r;
- }
-- }
-- if (!r) {
-- r = radeon_ib_test(rdev);
-+ /* Memory manager */
-+ r = radeon_object_init(rdev);
- if (r) {
-- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
- return r;
- }
-+ /* Initialize GART (initialize after TTM so we can allocate
-+ * memory through TTM but finalize after TTM) */
-+ r = radeon_gart_enable(rdev);
-+ if (!r) {
-+ r = radeon_gem_init(rdev);
-+ }
-+
-+ /* 1M ring buffer */
-+ if (!r) {
-+ r = radeon_cp_init(rdev, 1024 * 1024);
-+ }
-+ if (!r) {
-+ r = radeon_wb_init(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
-+ return r;
-+ }
-+ }
-+ if (!r) {
-+ r = radeon_ib_pool_init(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
-+ return r;
-+ }
-+ }
-+ if (!r) {
-+ r = radeon_ib_test(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
-+ return r;
-+ }
-+ }
-+ ret = r;
- }
-- ret = r;
- r = radeon_modeset_init(rdev);
- if (r) {
- return r;
-@@ -651,26 +683,29 @@ void radeon_device_fini(struct radeon_device *rdev)
- rdev->shutdown = true;
- /* Order matter so becarefull if you rearrange anythings */
- radeon_modeset_fini(rdev);
-- radeon_ib_pool_fini(rdev);
-- radeon_cp_fini(rdev);
-- radeon_wb_fini(rdev);
-- radeon_gem_fini(rdev);
-- radeon_object_fini(rdev);
-- /* mc_fini must be after object_fini */
-- radeon_mc_fini(rdev);
-+ if (!rdev->new_init_path) {
-+ radeon_ib_pool_fini(rdev);
-+ radeon_cp_fini(rdev);
-+ radeon_wb_fini(rdev);
-+ radeon_gem_fini(rdev);
-+ radeon_mc_fini(rdev);
- #if __OS_HAS_AGP
-- radeon_agp_fini(rdev);
-+ radeon_agp_fini(rdev);
- #endif
-- radeon_irq_kms_fini(rdev);
-- radeon_fence_driver_fini(rdev);
-- radeon_clocks_fini(rdev);
-- if (rdev->is_atom_bios) {
-- radeon_atombios_fini(rdev);
-+ radeon_irq_kms_fini(rdev);
-+ radeon_fence_driver_fini(rdev);
-+ radeon_clocks_fini(rdev);
-+ radeon_object_fini(rdev);
-+ if (rdev->is_atom_bios) {
-+ radeon_atombios_fini(rdev);
-+ } else {
-+ radeon_combios_fini(rdev);
-+ }
-+ kfree(rdev->bios);
-+ rdev->bios = NULL;
- } else {
-- radeon_combios_fini(rdev);
-+ radeon_fini(rdev);
- }
-- kfree(rdev->bios);
-- rdev->bios = NULL;
- iounmap(rdev->rmmio);
- rdev->rmmio = NULL;
- }
-@@ -708,9 +743,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
- /* wait for gpu to finish processing current batch */
- radeon_fence_wait_last(rdev);
-
-- radeon_cp_disable(rdev);
-- radeon_gart_disable(rdev);
--
-+ if (!rdev->new_init_path) {
-+ radeon_cp_disable(rdev);
-+ radeon_gart_disable(rdev);
-+ } else {
-+ radeon_suspend(rdev);
-+ }
- /* evict remaining vram memory */
- radeon_object_evict_vram(rdev);
-
-@@ -746,33 +784,37 @@ int radeon_resume_kms(struct drm_device *dev)
- if (radeon_gpu_reset(rdev)) {
- /* FIXME: what do we want to do here ? */
- }
-- /* post card */
-- if (rdev->is_atom_bios) {
-- atom_asic_init(rdev->mode_info.atom_context);
-+ if (!rdev->new_init_path) {
-+ /* post card */
-+ if (rdev->is_atom_bios) {
-+ atom_asic_init(rdev->mode_info.atom_context);
-+ } else {
-+ radeon_combios_asic_init(rdev->ddev);
-+ }
-+ /* Initialize clocks */
-+ r = radeon_clocks_init(rdev);
-+ if (r) {
-+ release_console_sem();
-+ return r;
-+ }
-+ /* Enable IRQ */
-+ rdev->irq.sw_int = true;
-+ radeon_irq_set(rdev);
-+ /* Initialize GPU Memory Controller */
-+ r = radeon_mc_init(rdev);
-+ if (r) {
-+ goto out;
-+ }
-+ r = radeon_gart_enable(rdev);
-+ if (r) {
-+ goto out;
-+ }
-+ r = radeon_cp_init(rdev, rdev->cp.ring_size);
-+ if (r) {
-+ goto out;
-+ }
- } else {
-- radeon_combios_asic_init(rdev->ddev);
-- }
-- /* Initialize clocks */
-- r = radeon_clocks_init(rdev);
-- if (r) {
-- release_console_sem();
-- return r;
-- }
-- /* Enable IRQ */
-- rdev->irq.sw_int = true;
-- radeon_irq_set(rdev);
-- /* Initialize GPU Memory Controller */
-- r = radeon_mc_init(rdev);
-- if (r) {
-- goto out;
-- }
-- r = radeon_gart_enable(rdev);
-- if (r) {
-- goto out;
-- }
-- r = radeon_cp_init(rdev, rdev->cp.ring_size);
-- if (r) {
-- goto out;
-+ radeon_resume(rdev);
- }
- out:
- fb_set_suspend(rdev->fbdev_info, 0);
-diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
-index a8fa1bb..9d817a6 100644
---- a/drivers/gpu/drm/radeon/radeon_display.c
-+++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -158,9 +158,6 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
- {
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-
-- if (radeon_crtc->mode_set.mode) {
-- drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode);
-- }
- drm_crtc_cleanup(crtc);
- kfree(radeon_crtc);
- }
-@@ -189,9 +186,11 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
- radeon_crtc->crtc_id = index;
- rdev->mode_info.crtcs[index] = radeon_crtc;
-
-+#if 0
- radeon_crtc->mode_set.crtc = &radeon_crtc->base;
- radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
- radeon_crtc->mode_set.num_connectors = 0;
-+#endif
-
- for (i = 0; i < 256; i++) {
- radeon_crtc->lut_r[i] = i << 2;
-@@ -313,7 +312,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
- }
- }
-
--bool radeon_setup_enc_conn(struct drm_device *dev)
-+static bool radeon_setup_enc_conn(struct drm_device *dev)
- {
- struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *drm_connector;
-@@ -347,9 +346,13 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
-
- if (!radeon_connector->ddc_bus)
- return -1;
-- radeon_i2c_do_lock(radeon_connector, 1);
-- edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-- radeon_i2c_do_lock(radeon_connector, 0);
-+ if (!radeon_connector->edid) {
-+ radeon_i2c_do_lock(radeon_connector, 1);
-+ edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-+ radeon_i2c_do_lock(radeon_connector, 0);
-+ } else
-+ edid = radeon_connector->edid;
-+
- if (edid) {
- /* update digital bits here */
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
-@@ -678,7 +681,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- continue;
- if (first) {
- radeon_crtc->rmx_type = radeon_encoder->rmx_type;
-- radeon_crtc->devices = radeon_encoder->devices;
- memcpy(&radeon_crtc->native_mode,
- &radeon_encoder->native_mode,
- sizeof(struct radeon_native_mode));
-diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
-index 0bd5879..29f040a 100644
---- a/drivers/gpu/drm/radeon/radeon_drv.c
-+++ b/drivers/gpu/drm/radeon/radeon_drv.c
-@@ -38,7 +38,6 @@
- #include <linux/console.h>
-
-
--#if defined(CONFIG_DRM_RADEON_KMS)
- /*
- * KMS wrapper.
- */
-@@ -77,11 +76,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
- int radeon_debugfs_init(struct drm_minor *minor);
- void radeon_debugfs_cleanup(struct drm_minor *minor);
- #endif
--#endif
-
-
- int radeon_no_wb;
--#if defined(CONFIG_DRM_RADEON_KMS)
- int radeon_modeset = -1;
- int radeon_dynclks = -1;
- int radeon_r4xx_atom = 0;
-@@ -91,12 +88,11 @@ int radeon_gart_size = 512; /* default gart size */
- int radeon_benchmarking = 0;
- int radeon_testing = 0;
- int radeon_connector_table = 0;
--#endif
-+int radeon_tv = 1;
-
- MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
- module_param_named(no_wb, radeon_no_wb, int, 0444);
-
--#if defined(CONFIG_DRM_RADEON_KMS)
- MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
- module_param_named(modeset, radeon_modeset, int, 0400);
-
-@@ -123,7 +119,9 @@ module_param_named(test, radeon_testing, int, 0444);
-
- MODULE_PARM_DESC(connector_table, "Force connector table");
- module_param_named(connector_table, radeon_connector_table, int, 0444);
--#endif
-+
-+MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
-+module_param_named(tv, radeon_tv, int, 0444);
-
- static int radeon_suspend(struct drm_device *dev, pm_message_t state)
- {
-@@ -215,7 +213,6 @@ static struct drm_driver driver_old = {
- .patchlevel = DRIVER_PATCHLEVEL,
- };
-
--#if defined(CONFIG_DRM_RADEON_KMS)
- static struct drm_driver kms_driver;
-
- static int __devinit
-@@ -309,7 +306,6 @@ static struct drm_driver kms_driver = {
- .minor = KMS_DRIVER_MINOR,
- .patchlevel = KMS_DRIVER_PATCHLEVEL,
- };
--#endif
-
- static struct drm_driver *driver;
-
-@@ -317,7 +313,6 @@ static int __init radeon_init(void)
- {
- driver = &driver_old;
- driver->num_ioctls = radeon_max_ioctl;
--#if defined(CONFIG_DRM_RADEON_KMS)
- #ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && radeon_modeset == -1) {
- DRM_INFO("VGACON disable radeon kernel modesetting.\n");
-@@ -328,8 +323,13 @@ static int __init radeon_init(void)
- #endif
- /* if enabled by default */
- if (radeon_modeset == -1) {
-- DRM_INFO("radeon default to kernel modesetting.\n");
-+#ifdef CONFIG_DRM_RADEON_KMS
-+ DRM_INFO("radeon defaulting to kernel modesetting.\n");
- radeon_modeset = 1;
-+#else
-+ DRM_INFO("radeon defaulting to userspace modesetting.\n");
-+ radeon_modeset = 0;
-+#endif
- }
- if (radeon_modeset == 1) {
- DRM_INFO("radeon kernel modesetting enabled.\n");
-@@ -339,7 +339,6 @@ static int __init radeon_init(void)
- }
- /* if the vga console setting is enabled still
- * let modprobe override it */
--#endif
- return drm_init(driver);
- }
-
-diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
-index 6fa32da..c7b1859 100644
---- a/drivers/gpu/drm/radeon/radeon_drv.h
-+++ b/drivers/gpu/drm/radeon/radeon_drv.h
-@@ -31,6 +31,9 @@
- #ifndef __RADEON_DRV_H__
- #define __RADEON_DRV_H__
-
-+#include <linux/firmware.h>
-+#include <linux/platform_device.h>
-+
- /* General customization:
- */
-
-@@ -353,6 +356,14 @@ typedef struct drm_radeon_private {
- int r700_sc_hiz_tile_fifo_size;
- int r700_sc_earlyz_tile_fifo_fize;
-
-+ struct mutex cs_mutex;
-+ u32 cs_id_scnt;
-+ u32 cs_id_wcnt;
-+ /* r6xx/r7xx drm blit vertex buffer */
-+ struct drm_buf *blit_vb;
-+
-+ /* firmware */
-+ const struct firmware *me_fw, *pfp_fw;
- } drm_radeon_private_t;
-
- typedef struct drm_radeon_buf_priv {
-@@ -391,6 +402,9 @@ static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
- (off >= gart_start && off <= gart_end));
- }
-
-+/* radeon_state.c */
-+extern void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf);
-+
- /* radeon_cp.c */
- extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
- extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
-@@ -482,6 +496,22 @@ extern int r600_cp_dispatch_indirect(struct drm_device *dev,
- struct drm_buf *buf, int start, int end);
- extern int r600_page_table_init(struct drm_device *dev);
- extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
-+extern int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
-+extern void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv);
-+extern int r600_cp_dispatch_texture(struct drm_device *dev,
-+ struct drm_file *file_priv,
-+ drm_radeon_texture_t *tex,
-+ drm_radeon_tex_image_t *image);
-+/* r600_blit.c */
-+extern int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv);
-+extern void r600_done_blit_copy(struct drm_device *dev);
-+extern void r600_blit_copy(struct drm_device *dev,
-+ uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
-+ int size_bytes);
-+extern void r600_blit_swap(struct drm_device *dev,
-+ uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
-+ int sx, int sy, int dx, int dy,
-+ int w, int h, int src_pitch, int dst_pitch, int cpp);
-
- /* Flags for stats.boxes
- */
-@@ -1109,13 +1139,71 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
- # define RADEON_CNTL_BITBLT_MULTI 0x00009B00
- # define RADEON_CNTL_SET_SCISSORS 0xC0001E00
-
--# define R600_IT_INDIRECT_BUFFER 0x00003200
--# define R600_IT_ME_INITIALIZE 0x00004400
-+# define R600_IT_INDIRECT_BUFFER_END 0x00001700
-+# define R600_IT_SET_PREDICATION 0x00002000
-+# define R600_IT_REG_RMW 0x00002100
-+# define R600_IT_COND_EXEC 0x00002200
-+# define R600_IT_PRED_EXEC 0x00002300
-+# define R600_IT_START_3D_CMDBUF 0x00002400
-+# define R600_IT_DRAW_INDEX_2 0x00002700
-+# define R600_IT_CONTEXT_CONTROL 0x00002800
-+# define R600_IT_DRAW_INDEX_IMMD_BE 0x00002900
-+# define R600_IT_INDEX_TYPE 0x00002A00
-+# define R600_IT_DRAW_INDEX 0x00002B00
-+# define R600_IT_DRAW_INDEX_AUTO 0x00002D00
-+# define R600_IT_DRAW_INDEX_IMMD 0x00002E00
-+# define R600_IT_NUM_INSTANCES 0x00002F00
-+# define R600_IT_STRMOUT_BUFFER_UPDATE 0x00003400
-+# define R600_IT_INDIRECT_BUFFER_MP 0x00003800
-+# define R600_IT_MEM_SEMAPHORE 0x00003900
-+# define R600_IT_MPEG_INDEX 0x00003A00
-+# define R600_IT_WAIT_REG_MEM 0x00003C00
-+# define R600_IT_MEM_WRITE 0x00003D00
-+# define R600_IT_INDIRECT_BUFFER 0x00003200
-+# define R600_IT_CP_INTERRUPT 0x00004000
-+# define R600_IT_SURFACE_SYNC 0x00004300
-+# define R600_CB0_DEST_BASE_ENA (1 << 6)
-+# define R600_TC_ACTION_ENA (1 << 23)
-+# define R600_VC_ACTION_ENA (1 << 24)
-+# define R600_CB_ACTION_ENA (1 << 25)
-+# define R600_DB_ACTION_ENA (1 << 26)
-+# define R600_SH_ACTION_ENA (1 << 27)
-+# define R600_SMX_ACTION_ENA (1 << 28)
-+# define R600_IT_ME_INITIALIZE 0x00004400
- # define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
--# define R600_IT_EVENT_WRITE 0x00004600
--# define R600_IT_SET_CONFIG_REG 0x00006800
--# define R600_SET_CONFIG_REG_OFFSET 0x00008000
--# define R600_SET_CONFIG_REG_END 0x0000ac00
-+# define R600_IT_COND_WRITE 0x00004500
-+# define R600_IT_EVENT_WRITE 0x00004600
-+# define R600_IT_EVENT_WRITE_EOP 0x00004700
-+# define R600_IT_ONE_REG_WRITE 0x00005700
-+# define R600_IT_SET_CONFIG_REG 0x00006800
-+# define R600_SET_CONFIG_REG_OFFSET 0x00008000
-+# define R600_SET_CONFIG_REG_END 0x0000ac00
-+# define R600_IT_SET_CONTEXT_REG 0x00006900
-+# define R600_SET_CONTEXT_REG_OFFSET 0x00028000
-+# define R600_SET_CONTEXT_REG_END 0x00029000
-+# define R600_IT_SET_ALU_CONST 0x00006A00
-+# define R600_SET_ALU_CONST_OFFSET 0x00030000
-+# define R600_SET_ALU_CONST_END 0x00032000
-+# define R600_IT_SET_BOOL_CONST 0x00006B00
-+# define R600_SET_BOOL_CONST_OFFSET 0x0003e380
-+# define R600_SET_BOOL_CONST_END 0x00040000
-+# define R600_IT_SET_LOOP_CONST 0x00006C00
-+# define R600_SET_LOOP_CONST_OFFSET 0x0003e200
-+# define R600_SET_LOOP_CONST_END 0x0003e380
-+# define R600_IT_SET_RESOURCE 0x00006D00
-+# define R600_SET_RESOURCE_OFFSET 0x00038000
-+# define R600_SET_RESOURCE_END 0x0003c000
-+# define R600_SQ_TEX_VTX_INVALID_TEXTURE 0x0
-+# define R600_SQ_TEX_VTX_INVALID_BUFFER 0x1
-+# define R600_SQ_TEX_VTX_VALID_TEXTURE 0x2
-+# define R600_SQ_TEX_VTX_VALID_BUFFER 0x3
-+# define R600_IT_SET_SAMPLER 0x00006E00
-+# define R600_SET_SAMPLER_OFFSET 0x0003c000
-+# define R600_SET_SAMPLER_END 0x0003cff0
-+# define R600_IT_SET_CTL_CONST 0x00006F00
-+# define R600_SET_CTL_CONST_OFFSET 0x0003cff0
-+# define R600_SET_CTL_CONST_END 0x0003e200
-+# define R600_IT_SURFACE_BASE_UPDATE 0x00007300
-
- #define RADEON_CP_PACKET_MASK 0xC0000000
- #define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
-@@ -1593,6 +1681,52 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
- #define R600_CB_COLOR7_BASE 0x2805c
- #define R600_CB_COLOR7_FRAG 0x280fc
-
-+#define R600_CB_COLOR0_SIZE 0x28060
-+#define R600_CB_COLOR0_VIEW 0x28080
-+#define R600_CB_COLOR0_INFO 0x280a0
-+#define R600_CB_COLOR0_TILE 0x280c0
-+#define R600_CB_COLOR0_FRAG 0x280e0
-+#define R600_CB_COLOR0_MASK 0x28100
-+
-+#define AVIVO_D1MODE_VLINE_START_END 0x6538
-+#define AVIVO_D2MODE_VLINE_START_END 0x6d38
-+#define R600_CP_COHER_BASE 0x85f8
-+#define R600_DB_DEPTH_BASE 0x2800c
-+#define R600_SQ_PGM_START_FS 0x28894
-+#define R600_SQ_PGM_START_ES 0x28880
-+#define R600_SQ_PGM_START_VS 0x28858
-+#define R600_SQ_PGM_RESOURCES_VS 0x28868
-+#define R600_SQ_PGM_CF_OFFSET_VS 0x288d0
-+#define R600_SQ_PGM_START_GS 0x2886c
-+#define R600_SQ_PGM_START_PS 0x28840
-+#define R600_SQ_PGM_RESOURCES_PS 0x28850
-+#define R600_SQ_PGM_EXPORTS_PS 0x28854
-+#define R600_SQ_PGM_CF_OFFSET_PS 0x288cc
-+#define R600_VGT_DMA_BASE 0x287e8
-+#define R600_VGT_DMA_BASE_HI 0x287e4
-+#define R600_VGT_STRMOUT_BASE_OFFSET_0 0x28b10
-+#define R600_VGT_STRMOUT_BASE_OFFSET_1 0x28b14
-+#define R600_VGT_STRMOUT_BASE_OFFSET_2 0x28b18
-+#define R600_VGT_STRMOUT_BASE_OFFSET_3 0x28b1c
-+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_0 0x28b44
-+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_1 0x28b48
-+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_2 0x28b4c
-+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_3 0x28b50
-+#define R600_VGT_STRMOUT_BUFFER_BASE_0 0x28ad8
-+#define R600_VGT_STRMOUT_BUFFER_BASE_1 0x28ae8
-+#define R600_VGT_STRMOUT_BUFFER_BASE_2 0x28af8
-+#define R600_VGT_STRMOUT_BUFFER_BASE_3 0x28b08
-+#define R600_VGT_STRMOUT_BUFFER_OFFSET_0 0x28adc
-+#define R600_VGT_STRMOUT_BUFFER_OFFSET_1 0x28aec
-+#define R600_VGT_STRMOUT_BUFFER_OFFSET_2 0x28afc
-+#define R600_VGT_STRMOUT_BUFFER_OFFSET_3 0x28b0c
-+
-+#define R600_VGT_PRIMITIVE_TYPE 0x8958
-+
-+#define R600_PA_SC_SCREEN_SCISSOR_TL 0x28030
-+#define R600_PA_SC_GENERIC_SCISSOR_TL 0x28240
-+#define R600_PA_SC_WINDOW_SCISSOR_TL 0x28204
-+
- #define R600_TC_CNTL 0x9608
- # define R600_TC_L2_SIZE(x) ((x) << 5)
- # define R600_L2_DISABLE_LATE_HIT (1 << 9)
-diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
-index 0a92706..9ad2035 100644
---- a/drivers/gpu/drm/radeon/radeon_encoders.c
-+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
-@@ -126,6 +126,23 @@ radeon_link_encoder_connector(struct drm_device *dev)
- }
- }
-
-+void radeon_encoder_set_active_device(struct drm_encoder *encoder)
-+{
-+ struct drm_device *dev = encoder->dev;
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ struct drm_connector *connector;
-+
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ if (connector->encoder == encoder) {
-+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-+ radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
-+ DRM_DEBUG("setting active device to %08x from %08x %08x for encoder %d\n",
-+ radeon_encoder->active_device, radeon_encoder->devices,
-+ radeon_connector->devices, encoder->encoder_type);
-+ }
-+ }
-+}
-+
- static struct drm_connector *
- radeon_get_connector_for_encoder(struct drm_encoder *encoder)
- {
-@@ -244,9 +261,9 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
-
- args.ucAction = action;
-
-- if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
- args.ucDacStandard = ATOM_DAC1_PS2;
-- else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.ucDacStandard = ATOM_DAC1_CV;
- else {
- switch (tv_std) {
-@@ -288,7 +305,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
-
- args.sTVEncoder.ucAction = action;
-
-- if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
- else {
- switch (tv_std) {
-@@ -825,10 +842,10 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
-
- /* XXX: fix up scratch reg handling */
- temp = RREG32(reg);
-- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- WREG32(reg, (ATOM_S3_TV1_ACTIVE |
- (radeon_crtc->crtc_id << 18)));
-- else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
- else
- WREG32(reg, 0);
-@@ -851,9 +868,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
- DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
- int index = 0;
- bool is_dig = false;
-+ int devices;
-
- memset(&args, 0, sizeof(args));
-
-+ /* on DPMS off we have no idea if active device is meaningful */
-+ if (mode != DRM_MODE_DPMS_ON && !radeon_encoder->active_device)
-+ devices = radeon_encoder->devices;
-+ else
-+ devices = radeon_encoder->active_device;
-+
-+ DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
-+ radeon_encoder->encoder_id, mode, radeon_encoder->devices,
-+ radeon_encoder->active_device);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-@@ -881,18 +908,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
-+ if (devices & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
-- else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ else if (devices & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
-+ if (devices & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
-- else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ else if (devices & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
-@@ -979,18 +1006,18 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
-- else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
- else
- args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
-- else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
- else
- args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
-@@ -1019,17 +1046,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
-- else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
-- else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
-+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
-@@ -1097,7 +1124,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
- atombios_set_encoder_crtc_source(encoder);
-
- if (ASIC_IS_AVIVO(rdev)) {
-- if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
- atombios_yuv_setup(encoder, true);
- else
- atombios_yuv_setup(encoder, false);
-@@ -1135,7 +1162,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
- case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- atombios_dac_setup(encoder, ATOM_ENABLE);
-- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
-+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_ENABLE);
- break;
- }
-@@ -1143,11 +1170,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
- }
-
- static bool
--atombios_dac_load_detect(struct drm_encoder *encoder)
-+atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
- {
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
- ATOM_DEVICE_CV_SUPPORT |
-@@ -1168,15 +1196,15 @@ atombios_dac_load_detect(struct drm_encoder *encoder)
- else
- args.sDacload.ucDacType = ATOM_DAC_B;
-
-- if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT)
-+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
-- else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT)
-+ else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
-- else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
-+ else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
- if (crev >= 3)
- args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
-- } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
-+ } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
- args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
- if (crev >= 3)
- args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
-@@ -1195,9 +1223,10 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- uint32_t bios_0_scratch;
-
-- if (!atombios_dac_load_detect(encoder)) {
-+ if (!atombios_dac_load_detect(encoder, connector)) {
- DRM_DEBUG("detect returned false \n");
- return connector_status_unknown;
- }
-@@ -1207,17 +1236,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
- else
- bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
-
-- DRM_DEBUG("Bios 0 scratch %x\n", bios_0_scratch);
-- if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
-+ DRM_DEBUG("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
-+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT1_MASK)
- return connector_status_connected;
-- } else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
-+ }
-+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
- if (bios_0_scratch & ATOM_S0_CRT2_MASK)
- return connector_status_connected;
-- } else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
-+ }
-+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
- return connector_status_connected;
-- } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
-+ }
-+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
- if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
- return connector_status_connected; /* CTV */
- else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
-@@ -1230,6 +1262,8 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
- {
- radeon_atom_output_lock(encoder, true);
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-+
-+ radeon_encoder_set_active_device(encoder);
- }
-
- static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
-@@ -1238,12 +1272,20 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
- radeon_atom_output_lock(encoder, false);
- }
-
-+static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
-+{
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-+ radeon_encoder->active_device = 0;
-+}
-+
- static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
- .dpms = radeon_atom_encoder_dpms,
- .mode_fixup = radeon_atom_mode_fixup,
- .prepare = radeon_atom_encoder_prepare,
- .mode_set = radeon_atom_encoder_mode_set,
- .commit = radeon_atom_encoder_commit,
-+ .disable = radeon_atom_encoder_disable,
- /* no detect for TMDS/LVDS yet */
- };
-
-@@ -1268,6 +1310,18 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
- .destroy = radeon_enc_destroy,
- };
-
-+struct radeon_encoder_atom_dac *
-+radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
-+{
-+ struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
-+
-+ if (!dac)
-+ return NULL;
-+
-+ dac->tv_std = TV_STD_NTSC;
-+ return dac;
-+}
-+
- struct radeon_encoder_atom_dig *
- radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
- {
-@@ -1336,6 +1390,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
-+ radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
- drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
-@@ -1345,8 +1400,14 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
-- radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
-+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-+ radeon_encoder->rmx_type = RMX_FULL;
-+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
-+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
-+ } else {
-+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
-+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
-+ }
- drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
- break;
- }
-diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
-index ec383ed..ebb5895 100644
---- a/drivers/gpu/drm/radeon/radeon_fb.c
-+++ b/drivers/gpu/drm/radeon/radeon_fb.c
-@@ -28,15 +28,7 @@
- */
-
- #include <linux/module.h>
--#include <linux/kernel.h>
--#include <linux/errno.h>
--#include <linux/string.h>
--#include <linux/mm.h>
--#include <linux/tty.h>
--#include <linux/slab.h>
--#include <linux/delay.h>
- #include <linux/fb.h>
--#include <linux/init.h>
-
- #include "drmP.h"
- #include "drm.h"
-@@ -45,375 +37,86 @@
- #include "radeon_drm.h"
- #include "radeon.h"
-
-+#include "drm_fb_helper.h"
-+
- struct radeon_fb_device {
-- struct radeon_device *rdev;
-- struct drm_display_mode *mode;
-+ struct drm_fb_helper helper;
- struct radeon_framebuffer *rfb;
-- int crtc_count;
-- /* crtc currently bound to this */
-- uint32_t crtc_ids[2];
-+ struct radeon_device *rdev;
- };
-
--static int radeonfb_setcolreg(unsigned regno,
-- unsigned red,
-- unsigned green,
-- unsigned blue,
-- unsigned transp,
-- struct fb_info *info)
--{
-- struct radeon_fb_device *rfbdev = info->par;
-- struct drm_device *dev = rfbdev->rdev->ddev;
-- struct drm_crtc *crtc;
-- int i;
--
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-- struct drm_mode_set *modeset = &radeon_crtc->mode_set;
-- struct drm_framebuffer *fb = modeset->fb;
--
-- for (i = 0; i < rfbdev->crtc_count; i++) {
-- if (crtc->base.id == rfbdev->crtc_ids[i]) {
-- break;
-- }
-- }
-- if (i == rfbdev->crtc_count) {
-- continue;
-- }
-- if (regno > 255) {
-- return 1;
-- }
-- if (fb->depth == 8) {
-- radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno);
-- return 0;
-- }
--
-- if (regno < 16) {
-- switch (fb->depth) {
-- case 15:
-- fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
-- ((green & 0xf800) >> 6) |
-- ((blue & 0xf800) >> 11);
-- break;
-- case 16:
-- fb->pseudo_palette[regno] = (red & 0xf800) |
-- ((green & 0xfc00) >> 5) |
-- ((blue & 0xf800) >> 11);
-- break;
-- case 24:
-- case 32:
-- fb->pseudo_palette[regno] =
-- (((red >> 8) & 0xff) << info->var.red.offset) |
-- (((green >> 8) & 0xff) << info->var.green.offset) |
-- (((blue >> 8) & 0xff) << info->var.blue.offset);
-- break;
-- }
-- }
-- }
-- return 0;
--}
--
--static int radeonfb_check_var(struct fb_var_screeninfo *var,
-- struct fb_info *info)
-+static int radeon_fb_check_var(struct fb_var_screeninfo *var,
-+ struct fb_info *info)
- {
-- struct radeon_fb_device *rfbdev = info->par;
-- struct radeon_framebuffer *rfb = rfbdev->rfb;
-- struct drm_framebuffer *fb = &rfb->base;
-- int depth;
--
-- if (var->pixclock == -1 || !var->pixclock) {
-- return -EINVAL;
-- }
-- /* Need to resize the fb object !!! */
-- if (var->xres > fb->width || var->yres > fb->height) {
-- DRM_ERROR("Requested width/height is greater than current fb "
-- "object %dx%d > %dx%d\n", var->xres, var->yres,
-- fb->width, fb->height);
-- DRM_ERROR("Need resizing code.\n");
-- return -EINVAL;
-- }
--
-- switch (var->bits_per_pixel) {
-- case 16:
-- depth = (var->green.length == 6) ? 16 : 15;
-- break;
-- case 32:
-- depth = (var->transp.length > 0) ? 32 : 24;
-- break;
-- default:
-- depth = var->bits_per_pixel;
-- break;
-- }
--
-- switch (depth) {
-- case 8:
-- var->red.offset = 0;
-- var->green.offset = 0;
-- var->blue.offset = 0;
-- var->red.length = 8;
-- var->green.length = 8;
-- var->blue.length = 8;
-- var->transp.length = 0;
-- var->transp.offset = 0;
-- break;
--#ifdef __LITTLE_ENDIAN
-- case 15:
-- var->red.offset = 10;
-- var->green.offset = 5;
-- var->blue.offset = 0;
-- var->red.length = 5;
-- var->green.length = 5;
-- var->blue.length = 5;
-- var->transp.length = 1;
-- var->transp.offset = 15;
-- break;
-- case 16:
-- var->red.offset = 11;
-- var->green.offset = 5;
-- var->blue.offset = 0;
-- var->red.length = 5;
-- var->green.length = 6;
-- var->blue.length = 5;
-- var->transp.length = 0;
-- var->transp.offset = 0;
-- break;
-- case 24:
-- var->red.offset = 16;
-- var->green.offset = 8;
-- var->blue.offset = 0;
-- var->red.length = 8;
-- var->green.length = 8;
-- var->blue.length = 8;
-- var->transp.length = 0;
-- var->transp.offset = 0;
-- break;
-- case 32:
-- var->red.offset = 16;
-- var->green.offset = 8;
-- var->blue.offset = 0;
-- var->red.length = 8;
-- var->green.length = 8;
-- var->blue.length = 8;
-- var->transp.length = 8;
-- var->transp.offset = 24;
-- break;
--#else
-- case 24:
-- var->red.offset = 8;
-- var->green.offset = 16;
-- var->blue.offset = 24;
-- var->red.length = 8;
-- var->green.length = 8;
-- var->blue.length = 8;
-- var->transp.length = 0;
-- var->transp.offset = 0;
-- break;
-- case 32:
-- var->red.offset = 8;
-- var->green.offset = 16;
-- var->blue.offset = 24;
-- var->red.length = 8;
-- var->green.length = 8;
-- var->blue.length = 8;
-- var->transp.length = 8;
-- var->transp.offset = 0;
-- break;
--#endif
-- default:
-- return -EINVAL;
-- }
-- return 0;
--}
--
--/* this will let fbcon do the mode init */
--static int radeonfb_set_par(struct fb_info *info)
--{
-- struct radeon_fb_device *rfbdev = info->par;
-- struct drm_device *dev = rfbdev->rdev->ddev;
-- struct fb_var_screeninfo *var = &info->var;
-- struct drm_crtc *crtc;
- int ret;
-- int i;
--
-- if (var->pixclock != -1) {
-- DRM_ERROR("PIXEL CLCOK SET\n");
-- return -EINVAL;
-- }
--
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
--
-- for (i = 0; i < rfbdev->crtc_count; i++) {
-- if (crtc->base.id == rfbdev->crtc_ids[i]) {
-- break;
-- }
-- }
-- if (i == rfbdev->crtc_count) {
-- continue;
-- }
-- if (crtc->fb == radeon_crtc->mode_set.fb) {
-- mutex_lock(&dev->mode_config.mutex);
-- ret = crtc->funcs->set_config(&radeon_crtc->mode_set);
-- mutex_unlock(&dev->mode_config.mutex);
-- if (ret) {
-- return ret;
-- }
-- }
-- }
-- return 0;
--}
--
--static int radeonfb_pan_display(struct fb_var_screeninfo *var,
-- struct fb_info *info)
--{
-- struct radeon_fb_device *rfbdev = info->par;
-- struct drm_device *dev = rfbdev->rdev->ddev;
-- struct drm_mode_set *modeset;
-- struct drm_crtc *crtc;
-- struct radeon_crtc *radeon_crtc;
-- int ret = 0;
-- int i;
--
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- for (i = 0; i < rfbdev->crtc_count; i++) {
-- if (crtc->base.id == rfbdev->crtc_ids[i]) {
-- break;
-- }
-- }
--
-- if (i == rfbdev->crtc_count) {
-- continue;
-- }
--
-- radeon_crtc = to_radeon_crtc(crtc);
-- modeset = &radeon_crtc->mode_set;
--
-- modeset->x = var->xoffset;
-- modeset->y = var->yoffset;
--
-- if (modeset->num_connectors) {
-- mutex_lock(&dev->mode_config.mutex);
-- ret = crtc->funcs->set_config(modeset);
-- mutex_unlock(&dev->mode_config.mutex);
-- if (!ret) {
-- info->var.xoffset = var->xoffset;
-- info->var.yoffset = var->yoffset;
-- }
-+ ret = drm_fb_helper_check_var(var, info);
-+ if (ret)
-+ return ret;
-+
-+ /* big endian override for radeon endian workaround */
-+#ifdef __BIG_ENDIAN
-+ {
-+ int depth;
-+ switch (var->bits_per_pixel) {
-+ case 16:
-+ depth = (var->green.length == 6) ? 16 : 15;
-+ break;
-+ case 32:
-+ depth = (var->transp.length > 0) ? 32 : 24;
-+ break;
-+ default:
-+ depth = var->bits_per_pixel;
-+ break;
- }
-- }
-- return ret;
--}
--
--static void radeonfb_on(struct fb_info *info)
--{
-- struct radeon_fb_device *rfbdev = info->par;
-- struct drm_device *dev = rfbdev->rdev->ddev;
-- struct drm_crtc *crtc;
-- struct drm_encoder *encoder;
-- int i;
--
-- /*
-- * For each CRTC in this fb, find all associated encoders
-- * and turn them off, then turn off the CRTC.
-- */
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
--
-- for (i = 0; i < rfbdev->crtc_count; i++) {
-- if (crtc->base.id == rfbdev->crtc_ids[i]) {
-- break;
-- }
-- }
--
-- mutex_lock(&dev->mode_config.mutex);
-- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-- mutex_unlock(&dev->mode_config.mutex);
--
-- /* Found a CRTC on this fb, now find encoders */
-- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-- if (encoder->crtc == crtc) {
-- struct drm_encoder_helper_funcs *encoder_funcs;
--
-- encoder_funcs = encoder->helper_private;
-- mutex_lock(&dev->mode_config.mutex);
-- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-- mutex_unlock(&dev->mode_config.mutex);
-- }
-- }
-- }
--}
--
--static void radeonfb_off(struct fb_info *info, int dpms_mode)
--{
-- struct radeon_fb_device *rfbdev = info->par;
-- struct drm_device *dev = rfbdev->rdev->ddev;
-- struct drm_crtc *crtc;
-- struct drm_encoder *encoder;
-- int i;
--
-- /*
-- * For each CRTC in this fb, find all associated encoders
-- * and turn them off, then turn off the CRTC.
-- */
-- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
--
-- for (i = 0; i < rfbdev->crtc_count; i++) {
-- if (crtc->base.id == rfbdev->crtc_ids[i]) {
-- break;
-- }
-- }
--
-- /* Found a CRTC on this fb, now find encoders */
-- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-- if (encoder->crtc == crtc) {
-- struct drm_encoder_helper_funcs *encoder_funcs;
--
-- encoder_funcs = encoder->helper_private;
-- mutex_lock(&dev->mode_config.mutex);
-- encoder_funcs->dpms(encoder, dpms_mode);
-- mutex_unlock(&dev->mode_config.mutex);
-- }
-- }
-- if (dpms_mode == DRM_MODE_DPMS_OFF) {
-- mutex_lock(&dev->mode_config.mutex);
-- crtc_funcs->dpms(crtc, dpms_mode);
-- mutex_unlock(&dev->mode_config.mutex);
-+ switch (depth) {
-+ case 8:
-+ var->red.offset = 0;
-+ var->green.offset = 0;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 24:
-+ var->red.offset = 8;
-+ var->green.offset = 16;
-+ var->blue.offset = 24;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 32:
-+ var->red.offset = 8;
-+ var->green.offset = 16;
-+ var->blue.offset = 24;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 8;
-+ var->transp.offset = 0;
-+ break;
-+ default:
-+ return -EINVAL;
- }
- }
--}
--
--int radeonfb_blank(int blank, struct fb_info *info)
--{
-- switch (blank) {
-- case FB_BLANK_UNBLANK:
-- radeonfb_on(info);
-- break;
-- case FB_BLANK_NORMAL:
-- radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
-- break;
-- case FB_BLANK_HSYNC_SUSPEND:
-- radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
-- break;
-- case FB_BLANK_VSYNC_SUSPEND:
-- radeonfb_off(info, DRM_MODE_DPMS_SUSPEND);
-- break;
-- case FB_BLANK_POWERDOWN:
-- radeonfb_off(info, DRM_MODE_DPMS_OFF);
-- break;
-- }
-+#endif
- return 0;
- }
-
- static struct fb_ops radeonfb_ops = {
- .owner = THIS_MODULE,
-- .fb_check_var = radeonfb_check_var,
-- .fb_set_par = radeonfb_set_par,
-- .fb_setcolreg = radeonfb_setcolreg,
-+ .fb_check_var = radeon_fb_check_var,
-+ .fb_set_par = drm_fb_helper_set_par,
-+ .fb_setcolreg = drm_fb_helper_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-- .fb_pan_display = radeonfb_pan_display,
-- .fb_blank = radeonfb_blank,
-+ .fb_pan_display = drm_fb_helper_pan_display,
-+ .fb_blank = drm_fb_helper_blank,
- };
-
- /**
-@@ -456,21 +159,6 @@ int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
- }
- EXPORT_SYMBOL(radeonfb_resize);
-
--static struct drm_mode_set panic_mode;
--
--int radeonfb_panic(struct notifier_block *n, unsigned long ununsed,
-- void *panic_str)
--{
-- DRM_ERROR("panic occurred, switching back to text console\n");
-- drm_crtc_helper_set_config(&panic_mode);
-- return 0;
--}
--EXPORT_SYMBOL(radeonfb_panic);
--
--static struct notifier_block paniced = {
-- .notifier_call = radeonfb_panic,
--};
--
- static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
- {
- int aligned = width;
-@@ -495,11 +183,16 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
- return aligned;
- }
-
--int radeonfb_create(struct radeon_device *rdev,
-+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
-+ .gamma_set = radeon_crtc_fb_gamma_set,
-+};
-+
-+int radeonfb_create(struct drm_device *dev,
- uint32_t fb_width, uint32_t fb_height,
- uint32_t surface_width, uint32_t surface_height,
-- struct radeon_framebuffer **rfb_p)
-+ struct drm_framebuffer **fb_p)
- {
-+ struct radeon_device *rdev = dev->dev_private;
- struct fb_info *info;
- struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb = NULL;
-@@ -554,8 +247,8 @@ int radeonfb_create(struct radeon_device *rdev,
-
- list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
-
-+ *fb_p = fb;
- rfb = to_radeon_framebuffer(fb);
-- *rfb_p = rfb;
- rdev->fbdev_rfb = rfb;
- rdev->fbdev_robj = robj;
-
-@@ -564,7 +257,14 @@ int radeonfb_create(struct radeon_device *rdev,
- ret = -ENOMEM;
- goto out_unref;
- }
-+
- rfbdev = info->par;
-+ rfbdev->helper.funcs = &radeon_fb_helper_funcs;
-+ rfbdev->helper.dev = dev;
-+ ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2,
-+ RADEONFB_CONN_LIMIT);
-+ if (ret)
-+ goto out_unref;
-
- if (fb_tiled)
- radeon_object_check_tiling(robj, 0, 0);
-@@ -577,33 +277,19 @@ int radeonfb_create(struct radeon_device *rdev,
- memset_io(fbptr, 0, aligned_size);
-
- strcpy(info->fix.id, "radeondrmfb");
-- info->fix.type = FB_TYPE_PACKED_PIXELS;
-- info->fix.visual = FB_VISUAL_TRUECOLOR;
-- info->fix.type_aux = 0;
-- info->fix.xpanstep = 1; /* doing it in hw */
-- info->fix.ypanstep = 1; /* doing it in hw */
-- info->fix.ywrapstep = 0;
-- info->fix.accel = FB_ACCEL_NONE;
-- info->fix.type_aux = 0;
-+
-+ drm_fb_helper_fill_fix(info, fb->pitch);
-+
- info->flags = FBINFO_DEFAULT;
- info->fbops = &radeonfb_ops;
-- info->fix.line_length = fb->pitch;
-+
- tmp = fb_gpuaddr - rdev->mc.vram_location;
- info->fix.smem_start = rdev->mc.aper_base + tmp;
- info->fix.smem_len = size;
- info->screen_base = fbptr;
- info->screen_size = size;
-- info->pseudo_palette = fb->pseudo_palette;
-- info->var.xres_virtual = fb->width;
-- info->var.yres_virtual = fb->height;
-- info->var.bits_per_pixel = fb->bits_per_pixel;
-- info->var.xoffset = 0;
-- info->var.yoffset = 0;
-- info->var.activate = FB_ACTIVATE_NOW;
-- info->var.height = -1;
-- info->var.width = -1;
-- info->var.xres = fb_width;
-- info->var.yres = fb_height;
-+
-+ drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
-
- /* setup aperture base/size for vesafb takeover */
- info->aperture_base = rdev->ddev->mode_config.fb_base;
-@@ -626,6 +312,9 @@ int radeonfb_create(struct radeon_device *rdev,
- DRM_INFO("fb depth is %d\n", fb->depth);
- DRM_INFO(" pitch is %d\n", fb->pitch);
-
-+#ifdef __BIG_ENDIAN
-+ /* fill var sets defaults for this stuff - override
-+ on big endian */
- switch (fb->depth) {
- case 8:
- info->var.red.offset = 0;
-@@ -637,47 +326,6 @@ int radeonfb_create(struct radeon_device *rdev,
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
--#ifdef __LITTLE_ENDIAN
-- case 15:
-- info->var.red.offset = 10;
-- info->var.green.offset = 5;
-- info->var.blue.offset = 0;
-- info->var.red.length = 5;
-- info->var.green.length = 5;
-- info->var.blue.length = 5;
-- info->var.transp.offset = 15;
-- info->var.transp.length = 1;
-- break;
-- case 16:
-- info->var.red.offset = 11;
-- info->var.green.offset = 5;
-- info->var.blue.offset = 0;
-- info->var.red.length = 5;
-- info->var.green.length = 6;
-- info->var.blue.length = 5;
-- info->var.transp.offset = 0;
-- break;
-- case 24:
-- info->var.red.offset = 16;
-- info->var.green.offset = 8;
-- info->var.blue.offset = 0;
-- info->var.red.length = 8;
-- info->var.green.length = 8;
-- info->var.blue.length = 8;
-- info->var.transp.offset = 0;
-- info->var.transp.length = 0;
-- break;
-- case 32:
-- info->var.red.offset = 16;
-- info->var.green.offset = 8;
-- info->var.blue.offset = 0;
-- info->var.red.length = 8;
-- info->var.green.length = 8;
-- info->var.blue.length = 8;
-- info->var.transp.offset = 24;
-- info->var.transp.length = 8;
-- break;
--#else
- case 24:
- info->var.red.offset = 8;
- info->var.green.offset = 16;
-@@ -699,9 +347,9 @@ int radeonfb_create(struct radeon_device *rdev,
- info->var.transp.length = 8;
- break;
- default:
--#endif
- break;
- }
-+#endif
-
- fb->fbdev = info;
- rfbdev->rfb = rfb;
-@@ -726,145 +374,10 @@ out:
- return ret;
- }
-
--static int radeonfb_single_fb_probe(struct radeon_device *rdev)
--{
-- struct drm_crtc *crtc;
-- struct drm_connector *connector;
-- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
-- unsigned int surface_width = 0, surface_height = 0;
-- int new_fb = 0;
-- int crtc_count = 0;
-- int ret, i, conn_count = 0;
-- struct radeon_framebuffer *rfb;
-- struct fb_info *info;
-- struct radeon_fb_device *rfbdev;
-- struct drm_mode_set *modeset = NULL;
--
-- /* first up get a count of crtcs now in use and new min/maxes width/heights */
-- list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
-- if (drm_helper_crtc_in_use(crtc)) {
-- if (crtc->desired_mode) {
-- if (crtc->desired_mode->hdisplay < fb_width)
-- fb_width = crtc->desired_mode->hdisplay;
--
-- if (crtc->desired_mode->vdisplay < fb_height)
-- fb_height = crtc->desired_mode->vdisplay;
--
-- if (crtc->desired_mode->hdisplay > surface_width)
-- surface_width = crtc->desired_mode->hdisplay;
--
-- if (crtc->desired_mode->vdisplay > surface_height)
-- surface_height = crtc->desired_mode->vdisplay;
-- }
-- crtc_count++;
-- }
-- }
--
-- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
-- /* hmm everyone went away - assume VGA cable just fell out
-- and will come back later. */
-- return 0;
-- }
--
-- /* do we have an fb already? */
-- if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) {
-- /* create an fb if we don't have one */
-- ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb);
-- if (ret) {
-- return -EINVAL;
-- }
-- new_fb = 1;
-- } else {
-- struct drm_framebuffer *fb;
-- fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head);
-- rfb = to_radeon_framebuffer(fb);
--
-- /* if someone hotplugs something bigger than we have already allocated, we are pwned.
-- As really we can't resize an fbdev that is in the wild currently due to fbdev
-- not really being designed for the lower layers moving stuff around under it.
-- - so in the grand style of things - punt. */
-- if ((fb->width < surface_width) || (fb->height < surface_height)) {
-- DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
-- return -EINVAL;
-- }
-- }
--
-- info = rfb->base.fbdev;
-- rdev->fbdev_info = info;
-- rfbdev = info->par;
--
-- crtc_count = 0;
-- /* okay we need to setup new connector sets in the crtcs */
-- list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
-- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-- modeset = &radeon_crtc->mode_set;
-- modeset->fb = &rfb->base;
-- conn_count = 0;
-- list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) {
-- if (connector->encoder)
-- if (connector->encoder->crtc == modeset->crtc) {
-- modeset->connectors[conn_count] = connector;
-- conn_count++;
-- if (conn_count > RADEONFB_CONN_LIMIT)
-- BUG();
-- }
-- }
--
-- for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++)
-- modeset->connectors[i] = NULL;
--
--
-- rfbdev->crtc_ids[crtc_count++] = crtc->base.id;
--
-- modeset->num_connectors = conn_count;
-- if (modeset->crtc->desired_mode) {
-- if (modeset->mode) {
-- drm_mode_destroy(rdev->ddev, modeset->mode);
-- }
-- modeset->mode = drm_mode_duplicate(rdev->ddev,
-- modeset->crtc->desired_mode);
-- }
-- }
-- rfbdev->crtc_count = crtc_count;
--
-- if (new_fb) {
-- info->var.pixclock = -1;
-- if (register_framebuffer(info) < 0)
-- return -EINVAL;
-- } else {
-- radeonfb_set_par(info);
-- }
-- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-- info->fix.id);
--
-- /* Switch back to kernel console on panic */
-- panic_mode = *modeset;
-- atomic_notifier_chain_register(&panic_notifier_list, &paniced);
-- printk(KERN_INFO "registered panic notifier\n");
--
-- return 0;
--}
--
- int radeonfb_probe(struct drm_device *dev)
- {
- int ret;
--
-- /* something has changed in the lower levels of hell - deal with it
-- here */
--
-- /* two modes : a) 1 fb to rule all crtcs.
-- b) one fb per crtc.
-- two actions 1) new connected device
-- 2) device removed.
-- case a/1 : if the fb surface isn't big enough - resize the surface fb.
-- if the fb size isn't big enough - resize fb into surface.
-- if everything big enough configure the new crtc/etc.
-- case a/2 : undo the configuration
-- possibly resize down the fb to fit the new configuration.
-- case b/1 : see if it is on a new crtc - setup a new fb and add it.
-- case b/2 : teardown the new fb.
-- */
-- ret = radeonfb_single_fb_probe(dev->dev_private);
-+ ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
- return ret;
- }
- EXPORT_SYMBOL(radeonfb_probe);
-@@ -880,16 +393,17 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
- }
- info = fb->fbdev;
- if (info) {
-+ struct radeon_fb_device *rfbdev = info->par;
- robj = rfb->obj->driver_private;
- unregister_framebuffer(info);
- radeon_object_kunmap(robj);
- radeon_object_unpin(robj);
-+ drm_fb_helper_free(&rfbdev->helper);
- framebuffer_release(info);
- }
-
- printk(KERN_INFO "unregistered panic notifier\n");
-- atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
-- memset(&panic_mode, 0, sizeof(struct drm_mode_set));
-+
- return 0;
- }
- EXPORT_SYMBOL(radeonfb_remove);
-diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
-index b4e48dd..3beb26d 100644
---- a/drivers/gpu/drm/radeon/radeon_fence.c
-+++ b/drivers/gpu/drm/radeon/radeon_fence.c
-@@ -53,9 +53,9 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
- * away
- */
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
-- } else {
-+ } else
- radeon_fence_ring_emit(rdev, fence);
-- }
-+
- fence->emited = true;
- fence->timeout = jiffies + ((2000 * HZ) / 1000);
- list_del(&fence->list);
-@@ -168,7 +168,38 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
- return signaled;
- }
-
--int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
-+int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
-+{
-+ struct radeon_device *rdev;
-+ int ret = 0;
-+
-+ rdev = fence->rdev;
-+
-+ __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-+
-+ while (1) {
-+ if (radeon_fence_signaled(fence))
-+ break;
-+
-+ if (time_after_eq(jiffies, fence->timeout)) {
-+ ret = -EBUSY;
-+ break;
-+ }
-+
-+ if (lazy)
-+ schedule_timeout(1);
-+
-+ if (intr && signal_pending(current)) {
-+ ret = -ERESTARTSYS;
-+ break;
-+ }
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return ret;
-+}
-+
-+
-+int radeon_fence_wait(struct radeon_fence *fence, bool intr)
- {
- struct radeon_device *rdev;
- unsigned long cur_jiffies;
-@@ -176,7 +207,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
- bool expired = false;
- int r;
-
--
- if (fence == NULL) {
- WARN(1, "Querying an invalid fence : %p !\n", fence);
- return 0;
-@@ -185,13 +215,22 @@ int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
- if (radeon_fence_signaled(fence)) {
- return 0;
- }
-+
-+ if (rdev->family >= CHIP_R600) {
-+ r = r600_fence_wait(fence, intr, 0);
-+ if (r == -ERESTARTSYS)
-+ return -EBUSY;
-+ return r;
-+ }
-+
- retry:
- cur_jiffies = jiffies;
- timeout = HZ / 100;
- if (time_after(fence->timeout, cur_jiffies)) {
- timeout = fence->timeout - cur_jiffies;
- }
-- if (interruptible) {
-+
-+ if (intr) {
- r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
- radeon_fence_signaled(fence), timeout);
- if (unlikely(r == -ERESTARTSYS)) {
-diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
-index 9805e4b..1841145 100644
---- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
-+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
-@@ -28,7 +28,6 @@
- #include "drmP.h"
- #include "radeon_drm.h"
- #include "radeon_reg.h"
--#include "radeon_microcode.h"
- #include "radeon.h"
- #include "atom.h"
-
-diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
-index 0da72f1..0d29d15 100644
---- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
-+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
-@@ -28,6 +28,7 @@
- #include <drm/radeon_drm.h>
- #include "radeon_fixed.h"
- #include "radeon.h"
-+#include "atom.h"
-
- static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
-@@ -501,6 +502,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-+ struct drm_encoder *encoder;
- int format;
- int hsync_start;
- int hsync_wid;
-@@ -509,8 +511,19 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
- uint32_t crtc_h_sync_strt_wid;
- uint32_t crtc_v_total_disp;
- uint32_t crtc_v_sync_strt_wid;
-+ bool is_tv = false;
-
- DRM_DEBUG("\n");
-+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-+ if (encoder->crtc == crtc) {
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
-+ is_tv = true;
-+ DRM_INFO("crtc %d is connected to a TV\n", radeon_crtc->crtc_id);
-+ break;
-+ }
-+ }
-+ }
-
- switch (crtc->fb->bits_per_pixel) {
- case 15: /* 555 */
-@@ -642,6 +655,11 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
- WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
- }
-
-+ if (is_tv)
-+ radeon_legacy_tv_adjust_crtc_reg(encoder, &crtc_h_total_disp,
-+ &crtc_h_sync_strt_wid, &crtc_v_total_disp,
-+ &crtc_v_sync_strt_wid);
-+
- WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
- WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
- WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
-@@ -668,7 +686,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
- uint32_t pll_ref_div = 0;
- uint32_t pll_fb_post_div = 0;
- uint32_t htotal_cntl = 0;
--
-+ bool is_tv = false;
- struct radeon_pll *pll;
-
- struct {
-@@ -703,6 +721,13 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+
-+ if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
-+ is_tv = true;
-+ break;
-+ }
-+
- if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
- pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
-@@ -766,6 +791,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
- ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
- RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
-
-+ if (is_tv) {
-+ radeon_legacy_tv_adjust_pll2(encoder, &htotal_cntl,
-+ &pll_ref_div, &pll_fb_post_div,
-+ &pixclks_cntl);
-+ }
-+
- WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
- RADEON_PIX2CLK_SRC_SEL_CPUCLK,
- ~(RADEON_PIX2CLK_SRC_SEL_MASK));
-@@ -820,6 +851,15 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
-
- WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
- } else {
-+ uint32_t pixclks_cntl;
-+
-+
-+ if (is_tv) {
-+ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
-+ radeon_legacy_tv_adjust_pll1(encoder, &htotal_cntl, &pll_ref_div,
-+ &pll_fb_post_div, &pixclks_cntl);
-+ }
-+
- if (rdev->flags & RADEON_IS_MOBILITY) {
- /* A temporal workaround for the occational blanking on certain laptop panels.
- This appears to related to the PLL divider registers (fail to lock?).
-@@ -914,6 +954,8 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
- RADEON_VCLK_SRC_SEL_PPLLCLK,
- ~(RADEON_VCLK_SRC_SEL_MASK));
-
-+ if (is_tv)
-+ WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
- }
- }
-
-diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
-index 9322675..0aaafcd 100644
---- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
-+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
-@@ -29,6 +29,15 @@
- #include "radeon.h"
- #include "atom.h"
-
-+static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
-+{
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ struct drm_encoder_helper_funcs *encoder_funcs;
-+
-+ encoder_funcs = encoder->helper_private;
-+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
-+ radeon_encoder->active_device = 0;
-+}
-
- static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
- {
-@@ -98,6 +107,8 @@ static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
- else
- radeon_combios_output_lock(encoder, true);
- radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
-+
-+ radeon_encoder_set_active_device(encoder);
- }
-
- static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
-@@ -195,6 +206,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
- .prepare = radeon_legacy_lvds_prepare,
- .mode_set = radeon_legacy_lvds_mode_set,
- .commit = radeon_legacy_lvds_commit,
-+ .disable = radeon_legacy_encoder_disable,
- };
-
-
-@@ -260,6 +272,7 @@ static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
- else
- radeon_combios_output_lock(encoder, true);
- radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
-+ radeon_encoder_set_active_device(encoder);
- }
-
- static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
-@@ -402,6 +415,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_fu
- .mode_set = radeon_legacy_primary_dac_mode_set,
- .commit = radeon_legacy_primary_dac_commit,
- .detect = radeon_legacy_primary_dac_detect,
-+ .disable = radeon_legacy_encoder_disable,
- };
-
-
-@@ -454,6 +468,7 @@ static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
- else
- radeon_combios_output_lock(encoder, true);
- radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
-+ radeon_encoder_set_active_device(encoder);
- }
-
- static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
-@@ -566,6 +581,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs
- .prepare = radeon_legacy_tmds_int_prepare,
- .mode_set = radeon_legacy_tmds_int_mode_set,
- .commit = radeon_legacy_tmds_int_commit,
-+ .disable = radeon_legacy_encoder_disable,
- };
-
-
-@@ -620,6 +636,7 @@ static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
- else
- radeon_combios_output_lock(encoder, true);
- radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
-+ radeon_encoder_set_active_device(encoder);
- }
-
- static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
-@@ -706,6 +723,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
- .prepare = radeon_legacy_tmds_ext_prepare,
- .mode_set = radeon_legacy_tmds_ext_mode_set,
- .commit = radeon_legacy_tmds_ext_commit,
-+ .disable = radeon_legacy_encoder_disable,
- };
-
-
-@@ -727,17 +745,21 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
- {
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
-- /* uint32_t tv_master_cntl = 0; */
--
-+ uint32_t tv_master_cntl = 0;
-+ bool is_tv;
- DRM_DEBUG("\n");
-
-+ is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
-+
- if (rdev->family == CHIP_R200)
- fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
- else {
-- crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
-- /* FIXME TV */
-- /* tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); */
-+ if (is_tv)
-+ tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
-+ else
-+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
- tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
- }
-
-@@ -746,20 +768,23 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
- if (rdev->family == CHIP_R200) {
- fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
- } else {
-- crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
-- /* tv_master_cntl |= RADEON_TV_ON; */
-+ if (is_tv)
-+ tv_master_cntl |= RADEON_TV_ON;
-+ else
-+ crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
-+
- if (rdev->family == CHIP_R420 ||
-- rdev->family == CHIP_R423 ||
-- rdev->family == CHIP_RV410)
-+ rdev->family == CHIP_R423 ||
-+ rdev->family == CHIP_RV410)
- tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
-- R420_TV_DAC_GDACPD |
-- R420_TV_DAC_BDACPD |
-- RADEON_TV_DAC_BGSLEEP);
-+ R420_TV_DAC_GDACPD |
-+ R420_TV_DAC_BDACPD |
-+ RADEON_TV_DAC_BGSLEEP);
- else
- tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
-- RADEON_TV_DAC_GDACPD |
-- RADEON_TV_DAC_BDACPD |
-- RADEON_TV_DAC_BGSLEEP);
-+ RADEON_TV_DAC_GDACPD |
-+ RADEON_TV_DAC_BDACPD |
-+ RADEON_TV_DAC_BGSLEEP);
- }
- break;
- case DRM_MODE_DPMS_STANDBY:
-@@ -768,8 +793,11 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
- if (rdev->family == CHIP_R200)
- fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
- else {
-- crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
-- /* tv_master_cntl &= ~RADEON_TV_ON; */
-+ if (is_tv)
-+ tv_master_cntl &= ~RADEON_TV_ON;
-+ else
-+ crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
-+
- if (rdev->family == CHIP_R420 ||
- rdev->family == CHIP_R423 ||
- rdev->family == CHIP_RV410)
-@@ -789,8 +817,10 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
- if (rdev->family == CHIP_R200) {
- WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
- } else {
-- WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
-- /* WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); */
-+ if (is_tv)
-+ WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
-+ else
-+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
- WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
- }
-
-@@ -809,6 +839,7 @@ static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
- else
- radeon_combios_output_lock(encoder, true);
- radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
-+ radeon_encoder_set_active_device(encoder);
- }
-
- static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
-@@ -831,11 +862,15 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
- uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
-- uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0;
-+ uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0;
-+ bool is_tv = false;
-
- DRM_DEBUG("\n");
-
-+ is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
-+
- if (rdev->family != CHIP_R200) {
- tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
- if (rdev->family == CHIP_R420 ||
-@@ -858,7 +893,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
- }
-
- /* FIXME TV */
-- if (radeon_encoder->enc_priv) {
-+ if (tv_dac) {
- struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
- tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
- RADEON_TV_DAC_NHOLD |
-@@ -875,44 +910,93 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
- if (ASIC_IS_R300(rdev)) {
- gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
- disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
-- } else if (rdev->family == CHIP_R200)
-- fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
-+ }
-+
-+ if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev))
-+ disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
- else
- disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
-
-- dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
-+ if (rdev->family == CHIP_R200)
-+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
-
-- if (radeon_crtc->crtc_id == 0) {
-- if (ASIC_IS_R300(rdev)) {
-- disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
-- disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
-- } else if (rdev->family == CHIP_R200) {
-- fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
-- RADEON_FP2_DVO_RATE_SEL_SDR);
-- } else
-- disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
-+ if (is_tv) {
-+ uint32_t dac_cntl;
-+
-+ dac_cntl = RREG32(RADEON_DAC_CNTL);
-+ dac_cntl &= ~RADEON_DAC_TVO_EN;
-+ WREG32(RADEON_DAC_CNTL, dac_cntl);
-+
-+ if (ASIC_IS_R300(rdev))
-+ gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1;
-+
-+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL;
-+ if (radeon_crtc->crtc_id == 0) {
-+ if (ASIC_IS_R300(rdev)) {
-+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
-+ disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC |
-+ RADEON_DISP_TV_SOURCE_CRTC);
-+ }
-+ if (rdev->family >= CHIP_R200) {
-+ disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2;
-+ } else {
-+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
-+ }
-+ } else {
-+ if (ASIC_IS_R300(rdev)) {
-+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
-+ disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC;
-+ }
-+ if (rdev->family >= CHIP_R200) {
-+ disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2;
-+ } else {
-+ disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
-+ }
-+ }
-+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
- } else {
-- if (ASIC_IS_R300(rdev)) {
-- disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
-- disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
-- } else if (rdev->family == CHIP_R200) {
-- fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
-- RADEON_FP2_DVO_RATE_SEL_SDR);
-- fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
-- } else
-- disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
-- }
-
-- WREG32(RADEON_DAC_CNTL2, dac2_cntl);
-+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
-+
-+ if (radeon_crtc->crtc_id == 0) {
-+ if (ASIC_IS_R300(rdev)) {
-+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
-+ disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
-+ } else if (rdev->family == CHIP_R200) {
-+ fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
-+ RADEON_FP2_DVO_RATE_SEL_SDR);
-+ } else
-+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
-+ } else {
-+ if (ASIC_IS_R300(rdev)) {
-+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
-+ disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
-+ } else if (rdev->family == CHIP_R200) {
-+ fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
-+ RADEON_FP2_DVO_RATE_SEL_SDR);
-+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
-+ } else
-+ disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
-+ }
-+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
-+ }
-
- if (ASIC_IS_R300(rdev)) {
- WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
-- WREG32(RADEON_DISP_TV_OUT_CNTL, disp_output_cntl);
-- } else if (rdev->family == CHIP_R200)
-- WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
-+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
-+ }
-+
-+ if (rdev->family >= CHIP_R200)
-+ WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
- else
- WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
-
-+ if (rdev->family == CHIP_R200)
-+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
-+
-+ if (is_tv)
-+ radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
-+
- if (rdev->is_atom_bios)
- radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
- else
-@@ -920,6 +1004,141 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
-
- }
-
-+static bool r300_legacy_tv_detect(struct drm_encoder *encoder,
-+ struct drm_connector *connector)
-+{
-+ struct drm_device *dev = encoder->dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
-+ uint32_t disp_output_cntl, gpiopad_a, tmp;
-+ bool found = false;
-+
-+ /* save regs needed */
-+ gpiopad_a = RREG32(RADEON_GPIOPAD_A);
-+ dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
-+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
-+ dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
-+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
-+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
-+
-+ WREG32_P(RADEON_GPIOPAD_A, 0, ~1);
-+
-+ WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL);
-+
-+ WREG32(RADEON_CRTC2_GEN_CNTL,
-+ RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT);
-+
-+ tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
-+ tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
-+ WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
-+
-+ WREG32(RADEON_DAC_EXT_CNTL,
-+ RADEON_DAC2_FORCE_BLANK_OFF_EN |
-+ RADEON_DAC2_FORCE_DATA_EN |
-+ RADEON_DAC_FORCE_DATA_SEL_RGB |
-+ (0xec << RADEON_DAC_FORCE_DATA_SHIFT));
-+
-+ WREG32(RADEON_TV_DAC_CNTL,
-+ RADEON_TV_DAC_STD_NTSC |
-+ (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
-+ (6 << RADEON_TV_DAC_DACADJ_SHIFT));
-+
-+ RREG32(RADEON_TV_DAC_CNTL);
-+ mdelay(4);
-+
-+ WREG32(RADEON_TV_DAC_CNTL,
-+ RADEON_TV_DAC_NBLANK |
-+ RADEON_TV_DAC_NHOLD |
-+ RADEON_TV_MONITOR_DETECT_EN |
-+ RADEON_TV_DAC_STD_NTSC |
-+ (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
-+ (6 << RADEON_TV_DAC_DACADJ_SHIFT));
-+
-+ RREG32(RADEON_TV_DAC_CNTL);
-+ mdelay(6);
-+
-+ tmp = RREG32(RADEON_TV_DAC_CNTL);
-+ if ((tmp & RADEON_TV_DAC_GDACDET) != 0) {
-+ found = true;
-+ DRM_DEBUG("S-video TV connection detected\n");
-+ } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
-+ found = true;
-+ DRM_DEBUG("Composite TV connection detected\n");
-+ }
-+
-+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
-+ WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
-+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
-+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
-+ WREG32(RADEON_DAC_CNTL2, dac_cntl2);
-+ WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
-+ return found;
-+}
-+
-+static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
-+ struct drm_connector *connector)
-+{
-+ struct drm_device *dev = encoder->dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ uint32_t tv_dac_cntl, dac_cntl2;
-+ uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp;
-+ bool found = false;
-+
-+ if (ASIC_IS_R300(rdev))
-+ return r300_legacy_tv_detect(encoder, connector);
-+
-+ dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
-+ tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
-+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
-+ config_cntl = RREG32(RADEON_CONFIG_CNTL);
-+ tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL);
-+
-+ tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL;
-+ WREG32(RADEON_DAC_CNTL2, tmp);
-+
-+ tmp = tv_master_cntl | RADEON_TV_ON;
-+ tmp &= ~(RADEON_TV_ASYNC_RST |
-+ RADEON_RESTART_PHASE_FIX |
-+ RADEON_CRT_FIFO_CE_EN |
-+ RADEON_TV_FIFO_CE_EN |
-+ RADEON_RE_SYNC_NOW_SEL_MASK);
-+ tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST;
-+ WREG32(RADEON_TV_MASTER_CNTL, tmp);
-+
-+ tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD |
-+ RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC |
-+ (8 << RADEON_TV_DAC_BGADJ_SHIFT);
-+
-+ if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK)
-+ tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT);
-+ else
-+ tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT);
-+ WREG32(RADEON_TV_DAC_CNTL, tmp);
-+
-+ tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN |
-+ RADEON_RED_MX_FORCE_DAC_DATA |
-+ RADEON_GRN_MX_FORCE_DAC_DATA |
-+ RADEON_BLU_MX_FORCE_DAC_DATA |
-+ (0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT);
-+ WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp);
-+
-+ mdelay(3);
-+ tmp = RREG32(RADEON_TV_DAC_CNTL);
-+ if (tmp & RADEON_TV_DAC_GDACDET) {
-+ found = true;
-+ DRM_DEBUG("S-video TV connection detected\n");
-+ } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
-+ found = true;
-+ DRM_DEBUG("Composite TV connection detected\n");
-+ }
-+
-+ WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl);
-+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
-+ WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
-+ WREG32(RADEON_DAC_CNTL2, dac_cntl2);
-+ return found;
-+}
-+
- static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
- {
-@@ -928,9 +1147,29 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
- uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
- uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp;
- enum drm_connector_status found = connector_status_disconnected;
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
- bool color = true;
-
-- /* FIXME tv */
-+ if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
-+ connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
-+ connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) {
-+ bool tv_detect;
-+
-+ if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT))
-+ return connector_status_disconnected;
-+
-+ tv_detect = radeon_legacy_tv_detect(encoder, connector);
-+ if (tv_detect && tv_dac)
-+ found = connector_status_connected;
-+ return found;
-+ }
-+
-+ /* don't probe if the encoder is being used for something else not CRT related */
-+ if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) {
-+ DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device);
-+ return connector_status_disconnected;
-+ }
-
- /* save the regs we need */
- pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
-@@ -1013,8 +1252,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
- }
- WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
-
-- /* return found; */
-- return connector_status_disconnected;
-+ return found;
-
- }
-
-@@ -1025,6 +1263,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs =
- .mode_set = radeon_legacy_tv_dac_mode_set,
- .commit = radeon_legacy_tv_dac_commit,
- .detect = radeon_legacy_tv_dac_detect,
-+ .disable = radeon_legacy_encoder_disable,
- };
-
-
-diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
-new file mode 100644
-index 0000000..3a12bb0
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
-@@ -0,0 +1,904 @@
-+#include "drmP.h"
-+#include "drm_crtc_helper.h"
-+#include "radeon.h"
-+
-+/*
-+ * Integrated TV out support based on the GATOS code by
-+ * Federico Ulivi <fulivi@lycos.com>
-+ */
-+
-+
-+/*
-+ * Limits of h/v positions (hPos & vPos)
-+ */
-+#define MAX_H_POSITION 5 /* Range: [-5..5], negative is on the left, 0 is default, positive is on the right */
-+#define MAX_V_POSITION 5 /* Range: [-5..5], negative is up, 0 is default, positive is down */
-+
-+/*
-+ * Unit for hPos (in TV clock periods)
-+ */
-+#define H_POS_UNIT 10
-+
-+/*
-+ * Indexes in h. code timing table for horizontal line position adjustment
-+ */
-+#define H_TABLE_POS1 6
-+#define H_TABLE_POS2 8
-+
-+/*
-+ * Limits of hor. size (hSize)
-+ */
-+#define MAX_H_SIZE 5 /* Range: [-5..5], negative is smaller, positive is larger */
-+
-+/* tv standard constants */
-+#define NTSC_TV_CLOCK_T 233
-+#define NTSC_TV_VFTOTAL 1
-+#define NTSC_TV_LINES_PER_FRAME 525
-+#define NTSC_TV_ZERO_H_SIZE 479166
-+#define NTSC_TV_H_SIZE_UNIT 9478
-+
-+#define PAL_TV_CLOCK_T 188
-+#define PAL_TV_VFTOTAL 3
-+#define PAL_TV_LINES_PER_FRAME 625
-+#define PAL_TV_ZERO_H_SIZE 473200
-+#define PAL_TV_H_SIZE_UNIT 9360
-+
-+/* tv pll setting for 27 mhz ref clk */
-+#define NTSC_TV_PLL_M_27 22
-+#define NTSC_TV_PLL_N_27 175
-+#define NTSC_TV_PLL_P_27 5
-+
-+#define PAL_TV_PLL_M_27 113
-+#define PAL_TV_PLL_N_27 668
-+#define PAL_TV_PLL_P_27 3
-+
-+/* tv pll setting for 14 mhz ref clk */
-+#define NTSC_TV_PLL_M_14 33
-+#define NTSC_TV_PLL_N_14 693
-+#define NTSC_TV_PLL_P_14 7
-+
-+#define VERT_LEAD_IN_LINES 2
-+#define FRAC_BITS 0xe
-+#define FRAC_MASK 0x3fff
-+
-+struct radeon_tv_mode_constants {
-+ uint16_t hor_resolution;
-+ uint16_t ver_resolution;
-+ enum radeon_tv_std standard;
-+ uint16_t hor_total;
-+ uint16_t ver_total;
-+ uint16_t hor_start;
-+ uint16_t hor_syncstart;
-+ uint16_t ver_syncstart;
-+ unsigned def_restart;
-+ uint16_t crtcPLL_N;
-+ uint8_t crtcPLL_M;
-+ uint8_t crtcPLL_post_div;
-+ unsigned pix_to_tv;
-+};
-+
-+static const uint16_t hor_timing_NTSC[] = {
-+ 0x0007,
-+ 0x003f,
-+ 0x0263,
-+ 0x0a24,
-+ 0x2a6b,
-+ 0x0a36,
-+ 0x126d, /* H_TABLE_POS1 */
-+ 0x1bfe,
-+ 0x1a8f, /* H_TABLE_POS2 */
-+ 0x1ec7,
-+ 0x3863,
-+ 0x1bfe,
-+ 0x1bfe,
-+ 0x1a2a,
-+ 0x1e95,
-+ 0x0e31,
-+ 0x201b,
-+ 0
-+};
-+
-+static const uint16_t vert_timing_NTSC[] = {
-+ 0x2001,
-+ 0x200d,
-+ 0x1006,
-+ 0x0c06,
-+ 0x1006,
-+ 0x1818,
-+ 0x21e3,
-+ 0x1006,
-+ 0x0c06,
-+ 0x1006,
-+ 0x1817,
-+ 0x21d4,
-+ 0x0002,
-+ 0
-+};
-+
-+static const uint16_t hor_timing_PAL[] = {
-+ 0x0007,
-+ 0x0058,
-+ 0x027c,
-+ 0x0a31,
-+ 0x2a77,
-+ 0x0a95,
-+ 0x124f, /* H_TABLE_POS1 */
-+ 0x1bfe,
-+ 0x1b22, /* H_TABLE_POS2 */
-+ 0x1ef9,
-+ 0x387c,
-+ 0x1bfe,
-+ 0x1bfe,
-+ 0x1b31,
-+ 0x1eb5,
-+ 0x0e43,
-+ 0x201b,
-+ 0
-+};
-+
-+static const uint16_t vert_timing_PAL[] = {
-+ 0x2001,
-+ 0x200c,
-+ 0x1005,
-+ 0x0c05,
-+ 0x1005,
-+ 0x1401,
-+ 0x1821,
-+ 0x2240,
-+ 0x1005,
-+ 0x0c05,
-+ 0x1005,
-+ 0x1401,
-+ 0x1822,
-+ 0x2230,
-+ 0x0002,
-+ 0
-+};
-+
-+/**********************************************************************
-+ *
-+ * availableModes
-+ *
-+ * Table of all allowed modes for tv output
-+ *
-+ **********************************************************************/
-+static const struct radeon_tv_mode_constants available_tv_modes[] = {
-+ { /* NTSC timing for 27 Mhz ref clk */
-+ 800, /* horResolution */
-+ 600, /* verResolution */
-+ TV_STD_NTSC, /* standard */
-+ 990, /* horTotal */
-+ 740, /* verTotal */
-+ 813, /* horStart */
-+ 824, /* horSyncStart */
-+ 632, /* verSyncStart */
-+ 625592, /* defRestart */
-+ 592, /* crtcPLL_N */
-+ 91, /* crtcPLL_M */
-+ 4, /* crtcPLL_postDiv */
-+ 1022, /* pixToTV */
-+ },
-+ { /* PAL timing for 27 Mhz ref clk */
-+ 800, /* horResolution */
-+ 600, /* verResolution */
-+ TV_STD_PAL, /* standard */
-+ 1144, /* horTotal */
-+ 706, /* verTotal */
-+ 812, /* horStart */
-+ 824, /* horSyncStart */
-+ 669, /* verSyncStart */
-+ 696700, /* defRestart */
-+ 1382, /* crtcPLL_N */
-+ 231, /* crtcPLL_M */
-+ 4, /* crtcPLL_postDiv */
-+ 759, /* pixToTV */
-+ },
-+ { /* NTSC timing for 14 Mhz ref clk */
-+ 800, /* horResolution */
-+ 600, /* verResolution */
-+ TV_STD_NTSC, /* standard */
-+ 1018, /* horTotal */
-+ 727, /* verTotal */
-+ 813, /* horStart */
-+ 840, /* horSyncStart */
-+ 633, /* verSyncStart */
-+ 630627, /* defRestart */
-+ 347, /* crtcPLL_N */
-+ 14, /* crtcPLL_M */
-+ 8, /* crtcPLL_postDiv */
-+ 1022, /* pixToTV */
-+ },
-+};
-+
-+#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
-+
-+static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(struct radeon_encoder *radeon_encoder,
-+ uint16_t *pll_ref_freq)
-+{
-+ struct drm_device *dev = radeon_encoder->base.dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ struct radeon_crtc *radeon_crtc;
-+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
-+ const struct radeon_tv_mode_constants *const_ptr;
-+ struct radeon_pll *pll;
-+
-+ radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
-+ if (radeon_crtc->crtc_id == 1)
-+ pll = &rdev->clock.p2pll;
-+ else
-+ pll = &rdev->clock.p1pll;
-+
-+ if (pll_ref_freq)
-+ *pll_ref_freq = pll->reference_freq;
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M) {
-+ if (pll->reference_freq == 2700)
-+ const_ptr = &available_tv_modes[0];
-+ else
-+ const_ptr = &available_tv_modes[2];
-+ } else {
-+ if (pll->reference_freq == 2700)
-+ const_ptr = &available_tv_modes[1];
-+ else
-+ const_ptr = &available_tv_modes[1]; /* FIX ME */
-+ }
-+ return const_ptr;
-+}
-+
-+static long YCOEF_value[5] = { 2, 2, 0, 4, 0 };
-+static long YCOEF_EN_value[5] = { 1, 1, 0, 1, 0 };
-+static long SLOPE_value[5] = { 1, 2, 2, 4, 8 };
-+static long SLOPE_limit[5] = { 6, 5, 4, 3, 2 };
-+
-+static void radeon_wait_pll_lock(struct drm_encoder *encoder, unsigned n_tests,
-+ unsigned n_wait_loops, unsigned cnt_threshold)
-+{
-+ struct drm_device *dev = encoder->dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ uint32_t save_pll_test;
-+ unsigned int i, j;
-+
-+ WREG32(RADEON_TEST_DEBUG_MUX, (RREG32(RADEON_TEST_DEBUG_MUX) & 0xffff60ff) | 0x100);
-+ save_pll_test = RREG32_PLL(RADEON_PLL_TEST_CNTL);
-+ WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test & ~RADEON_PLL_MASK_READ_B);
-+
-+ WREG8(RADEON_CLOCK_CNTL_INDEX, RADEON_PLL_TEST_CNTL);
-+ for (i = 0; i < n_tests; i++) {
-+ WREG8(RADEON_CLOCK_CNTL_DATA + 3, 0);
-+ for (j = 0; j < n_wait_loops; j++)
-+ if (RREG8(RADEON_CLOCK_CNTL_DATA + 3) >= cnt_threshold)
-+ break;
-+ }
-+ WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test);
-+ WREG32(RADEON_TEST_DEBUG_MUX, RREG32(RADEON_TEST_DEBUG_MUX) & 0xffffe0ff);
-+}
-+
-+
-+static void radeon_legacy_tv_write_fifo(struct radeon_encoder *radeon_encoder,
-+ uint16_t addr, uint32_t value)
-+{
-+ struct drm_device *dev = radeon_encoder->base.dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ uint32_t tmp;
-+ int i = 0;
-+
-+ WREG32(RADEON_TV_HOST_WRITE_DATA, value);
-+
-+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
-+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_WT);
-+
-+ do {
-+ tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
-+ if ((tmp & RADEON_HOST_FIFO_WT_ACK) == 0)
-+ break;
-+ i++;
-+ } while (i < 10000);
-+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
-+}
-+
-+#if 0 /* included for completeness */
-+static uint32_t radeon_legacy_tv_read_fifo(struct radeon_encoder *radeon_encoder, uint16_t addr)
-+{
-+ struct drm_device *dev = radeon_encoder->base.dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ uint32_t tmp;
-+ int i = 0;
-+
-+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
-+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_RD);
-+
-+ do {
-+ tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
-+ if ((tmp & RADEON_HOST_FIFO_RD_ACK) == 0)
-+ break;
-+ i++;
-+ } while (i < 10000);
-+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
-+ return RREG32(RADEON_TV_HOST_READ_DATA);
-+}
-+#endif
-+
-+static uint16_t radeon_get_htiming_tables_addr(uint32_t tv_uv_adr)
-+{
-+ uint16_t h_table;
-+
-+ switch ((tv_uv_adr & RADEON_HCODE_TABLE_SEL_MASK) >> RADEON_HCODE_TABLE_SEL_SHIFT) {
-+ case 0:
-+ h_table = RADEON_TV_MAX_FIFO_ADDR_INTERNAL;
-+ break;
-+ case 1:
-+ h_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2;
-+ break;
-+ case 2:
-+ h_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2;
-+ break;
-+ default:
-+ h_table = 0;
-+ break;
-+ }
-+ return h_table;
-+}
-+
-+static uint16_t radeon_get_vtiming_tables_addr(uint32_t tv_uv_adr)
-+{
-+ uint16_t v_table;
-+
-+ switch ((tv_uv_adr & RADEON_VCODE_TABLE_SEL_MASK) >> RADEON_VCODE_TABLE_SEL_SHIFT) {
-+ case 0:
-+ v_table = ((tv_uv_adr & RADEON_MAX_UV_ADR_MASK) >> RADEON_MAX_UV_ADR_SHIFT) * 2 + 1;
-+ break;
-+ case 1:
-+ v_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2 + 1;
-+ break;
-+ case 2:
-+ v_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2 + 1;
-+ break;
-+ default:
-+ v_table = 0;
-+ break;
-+ }
-+ return v_table;
-+}
-+
-+static void radeon_restore_tv_timing_tables(struct radeon_encoder *radeon_encoder)
-+{
-+ struct drm_device *dev = radeon_encoder->base.dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
-+ uint16_t h_table, v_table;
-+ uint32_t tmp;
-+ int i;
-+
-+ WREG32(RADEON_TV_UV_ADR, tv_dac->tv.tv_uv_adr);
-+ h_table = radeon_get_htiming_tables_addr(tv_dac->tv.tv_uv_adr);
-+ v_table = radeon_get_vtiming_tables_addr(tv_dac->tv.tv_uv_adr);
-+
-+ for (i = 0; i < MAX_H_CODE_TIMING_LEN; i += 2, h_table--) {
-+ tmp = ((uint32_t)tv_dac->tv.h_code_timing[i] << 14) | ((uint32_t)tv_dac->tv.h_code_timing[i+1]);
-+ radeon_legacy_tv_write_fifo(radeon_encoder, h_table, tmp);
-+ if (tv_dac->tv.h_code_timing[i] == 0 || tv_dac->tv.h_code_timing[i + 1] == 0)
-+ break;
-+ }
-+ for (i = 0; i < MAX_V_CODE_TIMING_LEN; i += 2, v_table++) {
-+ tmp = ((uint32_t)tv_dac->tv.v_code_timing[i+1] << 14) | ((uint32_t)tv_dac->tv.v_code_timing[i]);
-+ radeon_legacy_tv_write_fifo(radeon_encoder, v_table, tmp);
-+ if (tv_dac->tv.v_code_timing[i] == 0 || tv_dac->tv.v_code_timing[i + 1] == 0)
-+ break;
-+ }
-+}
-+
-+static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encoder)
-+{
-+ struct drm_device *dev = radeon_encoder->base.dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
-+ WREG32(RADEON_TV_FRESTART, tv_dac->tv.frestart);
-+ WREG32(RADEON_TV_HRESTART, tv_dac->tv.hrestart);
-+ WREG32(RADEON_TV_VRESTART, tv_dac->tv.vrestart);
-+}
-+
-+static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
-+{
-+ struct drm_device *dev = encoder->dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
-+ struct radeon_crtc *radeon_crtc;
-+ int restart;
-+ unsigned int h_total, v_total, f_total;
-+ int v_offset, h_offset;
-+ u16 p1, p2, h_inc;
-+ bool h_changed;
-+ const struct radeon_tv_mode_constants *const_ptr;
-+ struct radeon_pll *pll;
-+
-+ radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
-+ if (radeon_crtc->crtc_id == 1)
-+ pll = &rdev->clock.p2pll;
-+ else
-+ pll = &rdev->clock.p1pll;
-+
-+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
-+ if (!const_ptr)
-+ return false;
-+
-+ h_total = const_ptr->hor_total;
-+ v_total = const_ptr->ver_total;
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M ||
-+ tv_dac->tv_std == TV_STD_PAL_60)
-+ f_total = NTSC_TV_VFTOTAL + 1;
-+ else
-+ f_total = PAL_TV_VFTOTAL + 1;
-+
-+ /* adjust positions 1&2 in hor. cod timing table */
-+ h_offset = tv_dac->h_pos * H_POS_UNIT;
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M) {
-+ h_offset -= 50;
-+ p1 = hor_timing_NTSC[H_TABLE_POS1];
-+ p2 = hor_timing_NTSC[H_TABLE_POS2];
-+ } else {
-+ p1 = hor_timing_PAL[H_TABLE_POS1];
-+ p2 = hor_timing_PAL[H_TABLE_POS2];
-+ }
-+
-+ p1 = (u16)((int)p1 + h_offset);
-+ p2 = (u16)((int)p2 - h_offset);
-+
-+ h_changed = (p1 != tv_dac->tv.h_code_timing[H_TABLE_POS1] ||
-+ p2 != tv_dac->tv.h_code_timing[H_TABLE_POS2]);
-+
-+ tv_dac->tv.h_code_timing[H_TABLE_POS1] = p1;
-+ tv_dac->tv.h_code_timing[H_TABLE_POS2] = p2;
-+
-+ /* Convert hOffset from n. of TV clock periods to n. of CRTC clock periods (CRTC pixels) */
-+ h_offset = (h_offset * (int)(const_ptr->pix_to_tv)) / 1000;
-+
-+ /* adjust restart */
-+ restart = const_ptr->def_restart;
-+
-+ /*
-+ * convert v_pos TV lines to n. of CRTC pixels
-+ */
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M ||
-+ tv_dac->tv_std == TV_STD_PAL_60)
-+ v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(NTSC_TV_LINES_PER_FRAME);
-+ else
-+ v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(PAL_TV_LINES_PER_FRAME);
-+
-+ restart -= v_offset + h_offset;
-+
-+ DRM_DEBUG("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
-+ const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart);
-+
-+ tv_dac->tv.hrestart = restart % h_total;
-+ restart /= h_total;
-+ tv_dac->tv.vrestart = restart % v_total;
-+ restart /= v_total;
-+ tv_dac->tv.frestart = restart % f_total;
-+
-+ DRM_DEBUG("compute_restart: F/H/V=%u,%u,%u\n",
-+ (unsigned)tv_dac->tv.frestart,
-+ (unsigned)tv_dac->tv.vrestart,
-+ (unsigned)tv_dac->tv.hrestart);
-+
-+ /* compute h_inc from hsize */
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M)
-+ h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * NTSC_TV_CLOCK_T) /
-+ (tv_dac->h_size * (int)(NTSC_TV_H_SIZE_UNIT) + (int)(NTSC_TV_ZERO_H_SIZE)));
-+ else
-+ h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * PAL_TV_CLOCK_T) /
-+ (tv_dac->h_size * (int)(PAL_TV_H_SIZE_UNIT) + (int)(PAL_TV_ZERO_H_SIZE)));
-+
-+ tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) |
-+ ((u32)h_inc << RADEON_H_INC_SHIFT);
-+
-+ DRM_DEBUG("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
-+
-+ return h_changed;
-+}
-+
-+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ struct drm_device *dev = encoder->dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
-+ const struct radeon_tv_mode_constants *const_ptr;
-+ struct radeon_crtc *radeon_crtc;
-+ int i;
-+ uint16_t pll_ref_freq;
-+ uint32_t vert_space, flicker_removal, tmp;
-+ uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
-+ uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
-+ uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
-+ uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
-+ uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
-+ uint32_t m, n, p;
-+ const uint16_t *hor_timing;
-+ const uint16_t *vert_timing;
-+
-+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, &pll_ref_freq);
-+ if (!const_ptr)
-+ return;
-+
-+ radeon_crtc = to_radeon_crtc(encoder->crtc);
-+
-+ tv_master_cntl = (RADEON_VIN_ASYNC_RST |
-+ RADEON_CRT_FIFO_CE_EN |
-+ RADEON_TV_FIFO_CE_EN |
-+ RADEON_TV_ON);
-+
-+ if (!ASIC_IS_R300(rdev))
-+ tv_master_cntl |= RADEON_TVCLK_ALWAYS_ONb;
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J)
-+ tv_master_cntl |= RADEON_RESTART_PHASE_FIX;
-+
-+ tv_modulator_cntl1 = (RADEON_SLEW_RATE_LIMIT |
-+ RADEON_SYNC_TIP_LEVEL |
-+ RADEON_YFLT_EN |
-+ RADEON_UVFLT_EN |
-+ (6 << RADEON_CY_FILT_BLEND_SHIFT));
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J) {
-+ tv_modulator_cntl1 |= (0x46 << RADEON_SET_UP_LEVEL_SHIFT) |
-+ (0x3b << RADEON_BLANK_LEVEL_SHIFT);
-+ tv_modulator_cntl2 = (-111 & RADEON_TV_U_BURST_LEVEL_MASK) |
-+ ((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
-+ } else if (tv_dac->tv_std == TV_STD_SCART_PAL) {
-+ tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN;
-+ tv_modulator_cntl2 = (0 & RADEON_TV_U_BURST_LEVEL_MASK) |
-+ ((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
-+ } else {
-+ tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN |
-+ (0x3b << RADEON_SET_UP_LEVEL_SHIFT) |
-+ (0x3b << RADEON_BLANK_LEVEL_SHIFT);
-+ tv_modulator_cntl2 = (-78 & RADEON_TV_U_BURST_LEVEL_MASK) |
-+ ((62 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
-+ }
-+
-+
-+ tv_rgb_cntl = (RADEON_RGB_DITHER_EN
-+ | RADEON_TVOUT_SCALE_EN
-+ | (0x0b << RADEON_UVRAM_READ_MARGIN_SHIFT)
-+ | (0x07 << RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT)
-+ | RADEON_RGB_ATTEN_SEL(0x3)
-+ | RADEON_RGB_ATTEN_VAL(0xc));
-+
-+ if (radeon_crtc->crtc_id == 1)
-+ tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC2;
-+ else {
-+ if (radeon_crtc->rmx_type != RMX_OFF)
-+ tv_rgb_cntl |= RADEON_RGB_SRC_SEL_RMX;
-+ else
-+ tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC1;
-+ }
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M ||
-+ tv_dac->tv_std == TV_STD_PAL_60)
-+ vert_space = const_ptr->ver_total * 2 * 10000 / NTSC_TV_LINES_PER_FRAME;
-+ else
-+ vert_space = const_ptr->ver_total * 2 * 10000 / PAL_TV_LINES_PER_FRAME;
-+
-+ tmp = RREG32(RADEON_TV_VSCALER_CNTL1);
-+ tmp &= 0xe3ff0000;
-+ tmp |= (vert_space * (1 << FRAC_BITS) / 10000);
-+ tv_vscaler_cntl1 = tmp;
-+
-+ if (pll_ref_freq == 2700)
-+ tv_vscaler_cntl1 |= RADEON_RESTART_FIELD;
-+
-+ if (const_ptr->hor_resolution == 1024)
-+ tv_vscaler_cntl1 |= (4 << RADEON_Y_DEL_W_SIG_SHIFT);
-+ else
-+ tv_vscaler_cntl1 |= (2 << RADEON_Y_DEL_W_SIG_SHIFT);
-+
-+ /* scale up for int divide */
-+ tmp = const_ptr->ver_total * 2 * 1000;
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M ||
-+ tv_dac->tv_std == TV_STD_PAL_60) {
-+ tmp /= NTSC_TV_LINES_PER_FRAME;
-+ } else {
-+ tmp /= PAL_TV_LINES_PER_FRAME;
-+ }
-+ flicker_removal = (tmp + 500) / 1000;
-+
-+ if (flicker_removal < 3)
-+ flicker_removal = 3;
-+ for (i = 0; i < 6; ++i) {
-+ if (flicker_removal == SLOPE_limit[i])
-+ break;
-+ }
-+
-+ tv_y_saw_tooth_cntl = (vert_space * SLOPE_value[i] * (1 << (FRAC_BITS - 1)) +
-+ 5001) / 10000 / 8 | ((SLOPE_value[i] *
-+ (1 << (FRAC_BITS - 1)) / 8) << 16);
-+ tv_y_fall_cntl =
-+ (YCOEF_EN_value[i] << 17) | ((YCOEF_value[i] * (1 << 8) / 8) << 24) |
-+ RADEON_Y_FALL_PING_PONG | (272 * SLOPE_value[i] / 8) * (1 << (FRAC_BITS - 1)) /
-+ 1024;
-+ tv_y_rise_cntl = RADEON_Y_RISE_PING_PONG|
-+ (flicker_removal * 1024 - 272) * SLOPE_value[i] / 8 * (1 << (FRAC_BITS - 1)) / 1024;
-+
-+ tv_vscaler_cntl2 = RREG32(RADEON_TV_VSCALER_CNTL2) & 0x00fffff0;
-+ tv_vscaler_cntl2 |= (0x10 << 24) |
-+ RADEON_DITHER_MODE |
-+ RADEON_Y_OUTPUT_DITHER_EN |
-+ RADEON_UV_OUTPUT_DITHER_EN |
-+ RADEON_UV_TO_BUF_DITHER_EN;
-+
-+ tmp = (tv_vscaler_cntl1 >> RADEON_UV_INC_SHIFT) & RADEON_UV_INC_MASK;
-+ tmp = ((16384 * 256 * 10) / tmp + 5) / 10;
-+ tmp = (tmp << RADEON_UV_OUTPUT_POST_SCALE_SHIFT) | 0x000b0000;
-+ tv_dac->tv.timing_cntl = tmp;
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M ||
-+ tv_dac->tv_std == TV_STD_PAL_60)
-+ tv_dac_cntl = tv_dac->ntsc_tvdac_adj;
-+ else
-+ tv_dac_cntl = tv_dac->pal_tvdac_adj;
-+
-+ tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J)
-+ tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
-+ else
-+ tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J) {
-+ if (pll_ref_freq == 2700) {
-+ m = NTSC_TV_PLL_M_27;
-+ n = NTSC_TV_PLL_N_27;
-+ p = NTSC_TV_PLL_P_27;
-+ } else {
-+ m = NTSC_TV_PLL_M_14;
-+ n = NTSC_TV_PLL_N_14;
-+ p = NTSC_TV_PLL_P_14;
-+ }
-+ } else {
-+ if (pll_ref_freq == 2700) {
-+ m = PAL_TV_PLL_M_27;
-+ n = PAL_TV_PLL_N_27;
-+ p = PAL_TV_PLL_P_27;
-+ } else {
-+ m = PAL_TV_PLL_M_27;
-+ n = PAL_TV_PLL_N_27;
-+ p = PAL_TV_PLL_P_27;
-+ }
-+ }
-+
-+ tv_pll_cntl = (m & RADEON_TV_M0LO_MASK) |
-+ (((m >> 8) & RADEON_TV_M0HI_MASK) << RADEON_TV_M0HI_SHIFT) |
-+ ((n & RADEON_TV_N0LO_MASK) << RADEON_TV_N0LO_SHIFT) |
-+ (((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) |
-+ ((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
-+
-+ tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) |
-+ ((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) |
-+ ((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) |
-+ RADEON_TVCLK_SRC_SEL_TVPLL |
-+ RADEON_TVPLL_TEST_DIS);
-+
-+ tv_dac->tv.tv_uv_adr = 0xc8;
-+
-+ if (tv_dac->tv_std == TV_STD_NTSC ||
-+ tv_dac->tv_std == TV_STD_NTSC_J ||
-+ tv_dac->tv_std == TV_STD_PAL_M ||
-+ tv_dac->tv_std == TV_STD_PAL_60) {
-+ tv_ftotal = NTSC_TV_VFTOTAL;
-+ hor_timing = hor_timing_NTSC;
-+ vert_timing = vert_timing_NTSC;
-+ } else {
-+ hor_timing = hor_timing_PAL;
-+ vert_timing = vert_timing_PAL;
-+ tv_ftotal = PAL_TV_VFTOTAL;
-+ }
-+
-+ for (i = 0; i < MAX_H_CODE_TIMING_LEN; i++) {
-+ if ((tv_dac->tv.h_code_timing[i] = hor_timing[i]) == 0)
-+ break;
-+ }
-+
-+ for (i = 0; i < MAX_V_CODE_TIMING_LEN; i++) {
-+ if ((tv_dac->tv.v_code_timing[i] = vert_timing[i]) == 0)
-+ break;
-+ }
-+
-+ radeon_legacy_tv_init_restarts(encoder);
-+
-+ /* play with DAC_CNTL */
-+ /* play with GPIOPAD_A */
-+ /* DISP_OUTPUT_CNTL */
-+ /* use reference freq */
-+
-+ /* program the TV registers */
-+ WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
-+ RADEON_CRT_ASYNC_RST | RADEON_TV_FIFO_ASYNC_RST));
-+
-+ tmp = RREG32(RADEON_TV_DAC_CNTL);
-+ tmp &= ~RADEON_TV_DAC_NBLANK;
-+ tmp |= RADEON_TV_DAC_BGSLEEP |
-+ RADEON_TV_DAC_RDACPD |
-+ RADEON_TV_DAC_GDACPD |
-+ RADEON_TV_DAC_BDACPD;
-+ WREG32(RADEON_TV_DAC_CNTL, tmp);
-+
-+ /* TV PLL */
-+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVCLK_SRC_SEL_TVPLL);
-+ WREG32_PLL(RADEON_TV_PLL_CNTL, tv_pll_cntl);
-+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVPLL_RESET, ~RADEON_TVPLL_RESET);
-+
-+ radeon_wait_pll_lock(encoder, 200, 800, 135);
-+
-+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_RESET);
-+
-+ radeon_wait_pll_lock(encoder, 300, 160, 27);
-+ radeon_wait_pll_lock(encoder, 200, 800, 135);
-+
-+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~0xf);
-+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVCLK_SRC_SEL_TVPLL, ~RADEON_TVCLK_SRC_SEL_TVPLL);
-+
-+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, (1 << RADEON_TVPDC_SHIFT), ~RADEON_TVPDC_MASK);
-+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_SLEEP);
-+
-+ /* TV HV */
-+ WREG32(RADEON_TV_RGB_CNTL, tv_rgb_cntl);
-+ WREG32(RADEON_TV_HTOTAL, const_ptr->hor_total - 1);
-+ WREG32(RADEON_TV_HDISP, const_ptr->hor_resolution - 1);
-+ WREG32(RADEON_TV_HSTART, const_ptr->hor_start);
-+
-+ WREG32(RADEON_TV_VTOTAL, const_ptr->ver_total - 1);
-+ WREG32(RADEON_TV_VDISP, const_ptr->ver_resolution - 1);
-+ WREG32(RADEON_TV_FTOTAL, tv_ftotal);
-+ WREG32(RADEON_TV_VSCALER_CNTL1, tv_vscaler_cntl1);
-+ WREG32(RADEON_TV_VSCALER_CNTL2, tv_vscaler_cntl2);
-+
-+ WREG32(RADEON_TV_Y_FALL_CNTL, tv_y_fall_cntl);
-+ WREG32(RADEON_TV_Y_RISE_CNTL, tv_y_rise_cntl);
-+ WREG32(RADEON_TV_Y_SAW_TOOTH_CNTL, tv_y_saw_tooth_cntl);
-+
-+ WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
-+ RADEON_CRT_ASYNC_RST));
-+
-+ /* TV restarts */
-+ radeon_legacy_write_tv_restarts(radeon_encoder);
-+
-+ /* tv timings */
-+ radeon_restore_tv_timing_tables(radeon_encoder);
-+
-+ WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST));
-+
-+ /* tv std */
-+ WREG32(RADEON_TV_SYNC_CNTL, (RADEON_SYNC_PUB | RADEON_TV_SYNC_IO_DRIVE));
-+ WREG32(RADEON_TV_TIMING_CNTL, tv_dac->tv.timing_cntl);
-+ WREG32(RADEON_TV_MODULATOR_CNTL1, tv_modulator_cntl1);
-+ WREG32(RADEON_TV_MODULATOR_CNTL2, tv_modulator_cntl2);
-+ WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, (RADEON_Y_RED_EN |
-+ RADEON_C_GRN_EN |
-+ RADEON_CMP_BLU_EN |
-+ RADEON_DAC_DITHER_EN));
-+
-+ WREG32(RADEON_TV_CRC_CNTL, 0);
-+
-+ WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
-+
-+ WREG32(RADEON_TV_GAIN_LIMIT_SETTINGS, ((0x17f << RADEON_UV_GAIN_LIMIT_SHIFT) |
-+ (0x5ff << RADEON_Y_GAIN_LIMIT_SHIFT)));
-+ WREG32(RADEON_TV_LINEAR_GAIN_SETTINGS, ((0x100 << RADEON_UV_GAIN_SHIFT) |
-+ (0x100 << RADEON_Y_GAIN_SHIFT)));
-+
-+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
-+
-+}
-+
-+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
-+ uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
-+ uint32_t *v_total_disp, uint32_t *v_sync_strt_wid)
-+{
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ const struct radeon_tv_mode_constants *const_ptr;
-+ uint32_t tmp;
-+
-+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
-+ if (!const_ptr)
-+ return;
-+
-+ *h_total_disp = (((const_ptr->hor_resolution / 8) - 1) << RADEON_CRTC_H_DISP_SHIFT) |
-+ (((const_ptr->hor_total / 8) - 1) << RADEON_CRTC_H_TOTAL_SHIFT);
-+
-+ tmp = *h_sync_strt_wid;
-+ tmp &= ~(RADEON_CRTC_H_SYNC_STRT_PIX | RADEON_CRTC_H_SYNC_STRT_CHAR);
-+ tmp |= (((const_ptr->hor_syncstart / 8) - 1) << RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT) |
-+ (const_ptr->hor_syncstart & 7);
-+ *h_sync_strt_wid = tmp;
-+
-+ *v_total_disp = ((const_ptr->ver_resolution - 1) << RADEON_CRTC_V_DISP_SHIFT) |
-+ ((const_ptr->ver_total - 1) << RADEON_CRTC_V_TOTAL_SHIFT);
-+
-+ tmp = *v_sync_strt_wid;
-+ tmp &= ~RADEON_CRTC_V_SYNC_STRT;
-+ tmp |= ((const_ptr->ver_syncstart - 1) << RADEON_CRTC_V_SYNC_STRT_SHIFT);
-+ *v_sync_strt_wid = tmp;
-+}
-+
-+static inline int get_post_div(int value)
-+{
-+ int post_div;
-+ switch (value) {
-+ case 1: post_div = 0; break;
-+ case 2: post_div = 1; break;
-+ case 3: post_div = 4; break;
-+ case 4: post_div = 2; break;
-+ case 6: post_div = 6; break;
-+ case 8: post_div = 3; break;
-+ case 12: post_div = 7; break;
-+ case 16:
-+ default: post_div = 5; break;
-+ }
-+ return post_div;
-+}
-+
-+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
-+ uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
-+ uint32_t *ppll_div_3, uint32_t *pixclks_cntl)
-+{
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ const struct radeon_tv_mode_constants *const_ptr;
-+
-+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
-+ if (!const_ptr)
-+ return;
-+
-+ *htotal_cntl = (const_ptr->hor_total & 0x7) | RADEON_HTOT_CNTL_VGA_EN;
-+
-+ *ppll_ref_div = const_ptr->crtcPLL_M;
-+
-+ *ppll_div_3 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
-+ *pixclks_cntl &= ~(RADEON_PIX2CLK_SRC_SEL_MASK | RADEON_PIXCLK_TV_SRC_SEL);
-+ *pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK;
-+}
-+
-+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
-+ uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
-+ uint32_t *p2pll_div_0, uint32_t *pixclks_cntl)
-+{
-+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-+ const struct radeon_tv_mode_constants *const_ptr;
-+
-+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
-+ if (!const_ptr)
-+ return;
-+
-+ *htotal2_cntl = (const_ptr->hor_total & 0x7);
-+
-+ *p2pll_ref_div = const_ptr->crtcPLL_M;
-+
-+ *p2pll_div_0 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
-+ *pixclks_cntl &= ~RADEON_PIX2CLK_SRC_SEL_MASK;
-+ *pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK | RADEON_PIXCLK_TV_SRC_SEL;
-+}
-+
-diff --git a/drivers/gpu/drm/radeon/radeon_microcode.h b/drivers/gpu/drm/radeon/radeon_microcode.h
-deleted file mode 100644
-index a348c9e..0000000
---- a/drivers/gpu/drm/radeon/radeon_microcode.h
-+++ /dev/null
-@@ -1,33 +0,0 @@
--/*
-- * Copyright 2007 Advanced Micro Devices, Inc.
-- * All Rights Reserved.
-- *
-- * Permission is hereby granted, free of charge, to any person obtaining a
-- * copy of this software and associated documentation files (the "Software"),
-- * to deal in the Software without restriction, including without limitation
-- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-- * and/or sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following conditions:
-- *
-- * The above copyright notice and this permission notice (including the next
-- * paragraph) shall be included in all copies or substantial portions of the
-- * Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-- *
-- */
--
--#ifndef RADEON_MICROCODE_H
--#define RADEON_MICROCODE_H
--
--/* production radeon ucode r1xx-r6xx */
--/*(DEBLOBBED)*/
--
--
--#endif
-diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
-index 3b09a1f..523d6cb 100644
---- a/drivers/gpu/drm/radeon/radeon_mode.h
-+++ b/drivers/gpu/drm/radeon/radeon_mode.h
-@@ -188,6 +188,21 @@ struct radeon_native_mode {
- uint32_t flags;
- };
-
-+#define MAX_H_CODE_TIMING_LEN 32
-+#define MAX_V_CODE_TIMING_LEN 32
-+
-+/* need to store these as reading
-+ back code tables is excessive */
-+struct radeon_tv_regs {
-+ uint32_t tv_uv_adr;
-+ uint32_t timing_cntl;
-+ uint32_t hrestart;
-+ uint32_t vrestart;
-+ uint32_t frestart;
-+ uint16_t h_code_timing[MAX_H_CODE_TIMING_LEN];
-+ uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
-+};
-+
- struct radeon_crtc {
- struct drm_crtc base;
- int crtc_id;
-@@ -195,8 +210,6 @@ struct radeon_crtc {
- bool enabled;
- bool can_tile;
- uint32_t crtc_offset;
-- struct radeon_framebuffer *fbdev_fb;
-- struct drm_mode_set mode_set;
- struct drm_gem_object *cursor_bo;
- uint64_t cursor_addr;
- int cursor_width;
-@@ -204,7 +217,6 @@ struct radeon_crtc {
- uint32_t legacy_display_base_addr;
- uint32_t legacy_cursor_offset;
- enum radeon_rmx_type rmx_type;
-- uint32_t devices;
- fixed20_12 vsc;
- fixed20_12 hsc;
- struct radeon_native_mode native_mode;
-@@ -236,7 +248,13 @@ struct radeon_encoder_tv_dac {
- uint32_t ntsc_tvdac_adj;
- uint32_t pal_tvdac_adj;
-
-+ int h_pos;
-+ int v_pos;
-+ int h_size;
-+ int supported_tv_stds;
-+ bool tv_on;
- enum radeon_tv_std tv_std;
-+ struct radeon_tv_regs tv;
- };
-
- struct radeon_encoder_int_tmds {
-@@ -255,10 +273,15 @@ struct radeon_encoder_atom_dig {
- struct radeon_native_mode native_mode;
- };
-
-+struct radeon_encoder_atom_dac {
-+ enum radeon_tv_std tv_std;
-+};
-+
- struct radeon_encoder {
- struct drm_encoder base;
- uint32_t encoder_id;
- uint32_t devices;
-+ uint32_t active_device;
- uint32_t flags;
- uint32_t pixel_clock;
- enum radeon_rmx_type rmx_type;
-@@ -276,7 +299,10 @@ struct radeon_connector {
- uint32_t connector_id;
- uint32_t devices;
- struct radeon_i2c_chan *ddc_bus;
-- int use_digital;
-+ bool use_digital;
-+ /* we need to mind the EDID between detect
-+ and get modes due to analog/digital/tvencoder */
-+ struct edid *edid;
- void *con_priv;
- };
-
-@@ -310,6 +336,7 @@ struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, i
- struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
- extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
- extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
-+extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
-
- extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
- extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
-@@ -396,6 +423,19 @@ extern int radeon_static_clocks_init(struct drm_device *dev);
- bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
--void atom_rv515_force_tv_scaler(struct radeon_device *rdev);
--
-+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
-+
-+/* legacy tv */
-+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
-+ uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
-+ uint32_t *v_total_disp, uint32_t *v_sync_strt_wid);
-+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
-+ uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
-+ uint32_t *ppll_div_3, uint32_t *pixclks_cntl);
-+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
-+ uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
-+ uint32_t *p2pll_div_0, uint32_t *pixclks_cntl);
-+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode);
- #endif
-diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
-index 473e477..10e8af6 100644
---- a/drivers/gpu/drm/radeon/radeon_object.h
-+++ b/drivers/gpu/drm/radeon/radeon_object.h
-@@ -37,6 +37,7 @@
- * TTM.
- */
- struct radeon_mman {
-+ struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
- bool mem_global_referenced;
- struct ttm_bo_device bdev;
-diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
-index 4df43f6..21da871 100644
---- a/drivers/gpu/drm/radeon/radeon_reg.h
-+++ b/drivers/gpu/drm/radeon/radeon_reg.h
-@@ -1945,6 +1945,11 @@
- # define RADEON_TXFORMAT_DXT1 (12 << 0)
- # define RADEON_TXFORMAT_DXT23 (14 << 0)
- # define RADEON_TXFORMAT_DXT45 (15 << 0)
-+# define RADEON_TXFORMAT_SHADOW16 (16 << 0)
-+# define RADEON_TXFORMAT_SHADOW32 (17 << 0)
-+# define RADEON_TXFORMAT_DUDV88 (18 << 0)
-+# define RADEON_TXFORMAT_LDUDV655 (19 << 0)
-+# define RADEON_TXFORMAT_LDUDUV8888 (20 << 0)
- # define RADEON_TXFORMAT_FORMAT_MASK (31 << 0)
- # define RADEON_TXFORMAT_FORMAT_SHIFT 0
- # define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5)
-@@ -2203,7 +2208,7 @@
- # define RADEON_ROP_ENABLE (1 << 6)
- # define RADEON_STENCIL_ENABLE (1 << 7)
- # define RADEON_Z_ENABLE (1 << 8)
--# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9)
-+# define RADEON_DEPTHXY_OFFSET_ENABLE (1 << 9)
- # define RADEON_RB3D_COLOR_FORMAT_SHIFT 10
-
- # define RADEON_COLOR_FORMAT_ARGB1555 3
-@@ -2773,7 +2778,12 @@
- # define R200_TXFORMAT_DXT1 (12 << 0)
- # define R200_TXFORMAT_DXT23 (14 << 0)
- # define R200_TXFORMAT_DXT45 (15 << 0)
-+# define R200_TXFORMAT_DVDU88 (18 << 0)
-+# define R200_TXFORMAT_LDVDU655 (19 << 0)
-+# define R200_TXFORMAT_LDVDU8888 (20 << 0)
-+# define R200_TXFORMAT_GR1616 (21 << 0)
- # define R200_TXFORMAT_ABGR8888 (22 << 0)
-+# define R200_TXFORMAT_BGR111110 (23 << 0)
- # define R200_TXFORMAT_FORMAT_MASK (31 << 0)
- # define R200_TXFORMAT_FORMAT_SHIFT 0
- # define R200_TXFORMAT_ALPHA_IN_MAP (1 << 6)
-@@ -2818,6 +2828,13 @@
- #define R200_PP_TXPITCH_4 0x2c90 /* NPOT only */
- #define R200_PP_TXPITCH_5 0x2cb0 /* NPOT only */
-
-+#define R200_PP_CUBIC_FACES_0 0x2c18
-+#define R200_PP_CUBIC_FACES_1 0x2c38
-+#define R200_PP_CUBIC_FACES_2 0x2c58
-+#define R200_PP_CUBIC_FACES_3 0x2c78
-+#define R200_PP_CUBIC_FACES_4 0x2c98
-+#define R200_PP_CUBIC_FACES_5 0x2cb8
-+
- #define R200_PP_TXOFFSET_0 0x2d00
- # define R200_TXO_ENDIAN_NO_SWAP (0 << 0)
- # define R200_TXO_ENDIAN_BYTE_SWAP (1 << 0)
-@@ -2829,11 +2846,44 @@
- # define R200_TXO_MICRO_TILE (1 << 3)
- # define R200_TXO_OFFSET_MASK 0xffffffe0
- # define R200_TXO_OFFSET_SHIFT 5
-+#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04
-+#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08
-+#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c
-+#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10
-+#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14
-+
- #define R200_PP_TXOFFSET_1 0x2d18
-+#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c
-+#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20
-+#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24
-+#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28
-+#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c
-+
- #define R200_PP_TXOFFSET_2 0x2d30
-+#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34
-+#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38
-+#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c
-+#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40
-+#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44
-+
- #define R200_PP_TXOFFSET_3 0x2d48
-+#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c
-+#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50
-+#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54
-+#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58
-+#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c
- #define R200_PP_TXOFFSET_4 0x2d60
-+#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64
-+#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68
-+#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c
-+#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70
-+#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74
- #define R200_PP_TXOFFSET_5 0x2d78
-+#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c
-+#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80
-+#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84
-+#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88
-+#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c
-
- #define R200_PP_TFACTOR_0 0x2ee0
- #define R200_PP_TFACTOR_1 0x2ee4
-@@ -3175,6 +3225,11 @@
- # define R200_FORCE_INORDER_PROC (1<<31)
- #define R200_PP_CNTL_X 0x2cc4
- #define R200_PP_TXMULTI_CTL_0 0x2c1c
-+#define R200_PP_TXMULTI_CTL_1 0x2c3c
-+#define R200_PP_TXMULTI_CTL_2 0x2c5c
-+#define R200_PP_TXMULTI_CTL_3 0x2c7c
-+#define R200_PP_TXMULTI_CTL_4 0x2c9c
-+#define R200_PP_TXMULTI_CTL_5 0x2cbc
- #define R200_SE_VTX_STATE_CNTL 0x2180
- # define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
-
-@@ -3200,6 +3255,24 @@
- #define RADEON_CP_RB_WPTR 0x0714
- #define RADEON_CP_RB_RPTR_WR 0x071c
-
-+#define RADEON_SCRATCH_UMSK 0x0770
-+#define RADEON_SCRATCH_ADDR 0x0774
-+
-+#define R600_CP_RB_BASE 0xc100
-+#define R600_CP_RB_CNTL 0xc104
-+# define R600_RB_BUFSZ(x) ((x) << 0)
-+# define R600_RB_BLKSZ(x) ((x) << 8)
-+# define R600_RB_NO_UPDATE (1 << 27)
-+# define R600_RB_RPTR_WR_ENA (1 << 31)
-+#define R600_CP_RB_RPTR_WR 0xc108
-+#define R600_CP_RB_RPTR_ADDR 0xc10c
-+#define R600_CP_RB_RPTR_ADDR_HI 0xc110
-+#define R600_CP_RB_WPTR 0xc114
-+#define R600_CP_RB_WPTR_ADDR 0xc118
-+#define R600_CP_RB_WPTR_ADDR_HI 0xc11c
-+#define R600_CP_RB_RPTR 0x8700
-+#define R600_CP_RB_WPTR_DELAY 0x8704
-+
- #define RADEON_CP_IB_BASE 0x0738
- #define RADEON_CP_IB_BUFSZ 0x073c
-
-@@ -3407,7 +3480,9 @@
- # define RADEON_RGB_CONVERT_BY_PASS (1 << 10)
- # define RADEON_UVRAM_READ_MARGIN_SHIFT 16
- # define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT 20
--# define RADEON_TVOUT_SCALE_EN (1 << 26)
-+# define RADEON_RGB_ATTEN_SEL(x) ((x) << 24)
-+# define RADEON_TVOUT_SCALE_EN (1 << 26)
-+# define RADEON_RGB_ATTEN_VAL(x) ((x) << 28)
- #define RADEON_TV_SYNC_CNTL 0x0808
- # define RADEON_SYNC_OE (1 << 0)
- # define RADEON_SYNC_OUT (1 << 1)
-diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
-index 60d1593..aa9837a 100644
---- a/drivers/gpu/drm/radeon/radeon_ring.c
-+++ b/drivers/gpu/drm/radeon/radeon_ring.c
-@@ -110,7 +110,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
- return;
- }
- list_del(&tmp->list);
-- INIT_LIST_HEAD(&tmp->list);
- if (tmp->fence) {
- radeon_fence_unref(&tmp->fence);
- }
-@@ -119,19 +118,11 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
- mutex_unlock(&rdev->ib_pool.mutex);
- }
-
--static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
--{
-- while ((ib->length_dw & rdev->cp.align_mask)) {
-- ib->ptr[ib->length_dw++] = PACKET2(0);
-- }
--}
--
- int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
- {
- int r = 0;
-
- mutex_lock(&rdev->ib_pool.mutex);
-- radeon_ib_align(rdev, ib);
- if (!ib->length_dw || !rdev->cp.ready) {
- /* TODO: Nothings in the ib we should report. */
- mutex_unlock(&rdev->ib_pool.mutex);
-@@ -145,9 +136,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
- mutex_unlock(&rdev->ib_pool.mutex);
- return r;
- }
-- radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
-- radeon_ring_write(rdev, ib->gpu_addr);
-- radeon_ring_write(rdev, ib->length_dw);
-+ radeon_ring_ib_execute(rdev, ib);
- radeon_fence_emit(rdev, ib->fence);
- radeon_ring_unlock_commit(rdev);
- list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
-@@ -215,69 +204,16 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
- mutex_unlock(&rdev->ib_pool.mutex);
- }
-
--int radeon_ib_test(struct radeon_device *rdev)
--{
-- struct radeon_ib *ib;
-- uint32_t scratch;
-- uint32_t tmp = 0;
-- unsigned i;
-- int r;
--
-- r = radeon_scratch_get(rdev, &scratch);
-- if (r) {
-- DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
-- return r;
-- }
-- WREG32(scratch, 0xCAFEDEAD);
-- r = radeon_ib_get(rdev, &ib);
-- if (r) {
-- return r;
-- }
-- ib->ptr[0] = PACKET0(scratch, 0);
-- ib->ptr[1] = 0xDEADBEEF;
-- ib->ptr[2] = PACKET2(0);
-- ib->ptr[3] = PACKET2(0);
-- ib->ptr[4] = PACKET2(0);
-- ib->ptr[5] = PACKET2(0);
-- ib->ptr[6] = PACKET2(0);
-- ib->ptr[7] = PACKET2(0);
-- ib->length_dw = 8;
-- r = radeon_ib_schedule(rdev, ib);
-- if (r) {
-- radeon_scratch_free(rdev, scratch);
-- radeon_ib_free(rdev, &ib);
-- return r;
-- }
-- r = radeon_fence_wait(ib->fence, false);
-- if (r) {
-- return r;
-- }
-- for (i = 0; i < rdev->usec_timeout; i++) {
-- tmp = RREG32(scratch);
-- if (tmp == 0xDEADBEEF) {
-- break;
-- }
-- DRM_UDELAY(1);
-- }
-- if (i < rdev->usec_timeout) {
-- DRM_INFO("ib test succeeded in %u usecs\n", i);
-- } else {
-- DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
-- scratch, tmp);
-- r = -EINVAL;
-- }
-- radeon_scratch_free(rdev, scratch);
-- radeon_ib_free(rdev, &ib);
-- return r;
--}
--
-
- /*
- * Ring.
- */
- void radeon_ring_free_size(struct radeon_device *rdev)
- {
-- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
-+ if (rdev->family >= CHIP_R600)
-+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
-+ else
-+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- /* This works because ring_size is a power of 2 */
- rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
- rdev->cp.ring_free_dw -= rdev->cp.wptr;
-@@ -320,11 +256,10 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
- count_dw_pad = (rdev->cp.align_mask + 1) -
- (rdev->cp.wptr & rdev->cp.align_mask);
- for (i = 0; i < count_dw_pad; i++) {
-- radeon_ring_write(rdev, PACKET2(0));
-+ radeon_ring_write(rdev, 2 << 30);
- }
- DRM_MEMORYBARRIER();
-- WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
-- (void)RREG32(RADEON_CP_RB_WPTR);
-+ radeon_cp_commit(rdev);
- mutex_unlock(&rdev->cp.mutex);
- }
-
-@@ -334,46 +269,6 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev)
- mutex_unlock(&rdev->cp.mutex);
- }
-
--int radeon_ring_test(struct radeon_device *rdev)
--{
-- uint32_t scratch;
-- uint32_t tmp = 0;
-- unsigned i;
-- int r;
--
-- r = radeon_scratch_get(rdev, &scratch);
-- if (r) {
-- DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
-- return r;
-- }
-- WREG32(scratch, 0xCAFEDEAD);
-- r = radeon_ring_lock(rdev, 2);
-- if (r) {
-- DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
-- radeon_scratch_free(rdev, scratch);
-- return r;
-- }
-- radeon_ring_write(rdev, PACKET0(scratch, 0));
-- radeon_ring_write(rdev, 0xDEADBEEF);
-- radeon_ring_unlock_commit(rdev);
-- for (i = 0; i < rdev->usec_timeout; i++) {
-- tmp = RREG32(scratch);
-- if (tmp == 0xDEADBEEF) {
-- break;
-- }
-- DRM_UDELAY(1);
-- }
-- if (i < rdev->usec_timeout) {
-- DRM_INFO("ring test succeeded in %d usecs\n", i);
-- } else {
-- DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
-- scratch, tmp);
-- r = -EINVAL;
-- }
-- radeon_scratch_free(rdev, scratch);
-- return r;
--}
--
- int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
- {
- int r;
-diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h
-index 63a7735..5f9e358 100644
---- a/drivers/gpu/drm/radeon/radeon_share.h
-+++ b/drivers/gpu/drm/radeon/radeon_share.h
-@@ -28,12 +28,89 @@
- #ifndef __RADEON_SHARE_H__
- #define __RADEON_SHARE_H__
-
-+/* Common */
-+struct radeon_device;
-+struct radeon_cs_parser;
-+int radeon_clocks_init(struct radeon_device *rdev);
-+void radeon_clocks_fini(struct radeon_device *rdev);
-+void radeon_scratch_init(struct radeon_device *rdev);
-+void radeon_surface_init(struct radeon_device *rdev);
-+int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
-+
-+
-+/* R100, RV100, RS100, RV200, RS200, R200, RV250, RS300, RV280 */
- void r100_vram_init_sizes(struct radeon_device *rdev);
-
-+
-+/* R300, R350, RV350, RV380 */
-+struct r300_asic {
-+ const unsigned *reg_safe_bm;
-+ unsigned reg_safe_bm_size;
-+};
-+
-+
-+/* RS690, RS740 */
- void rs690_line_buffer_adjust(struct radeon_device *rdev,
- struct drm_display_mode *mode1,
- struct drm_display_mode *mode2);
-
-+
-+/* RV515 */
- void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
-
-+
-+/* R600, RV610, RV630, RV620, RV635, RV670, RS780, RS880 */
-+bool r600_card_posted(struct radeon_device *rdev);
-+void r600_cp_stop(struct radeon_device *rdev);
-+void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
-+int r600_cp_resume(struct radeon_device *rdev);
-+int r600_count_pipe_bits(uint32_t val);
-+int r600_gart_clear_page(struct radeon_device *rdev, int i);
-+int r600_mc_wait_for_idle(struct radeon_device *rdev);
-+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
-+int r600_ib_test(struct radeon_device *rdev);
-+int r600_ring_test(struct radeon_device *rdev);
-+int r600_wb_init(struct radeon_device *rdev);
-+void r600_wb_fini(struct radeon_device *rdev);
-+void r600_scratch_init(struct radeon_device *rdev);
-+int r600_blit_init(struct radeon_device *rdev);
-+void r600_blit_fini(struct radeon_device *rdev);
-+int r600_cp_init_microcode(struct radeon_device *rdev);
-+struct r600_asic {
-+ unsigned max_pipes;
-+ unsigned max_tile_pipes;
-+ unsigned max_simds;
-+ unsigned max_backends;
-+ unsigned max_gprs;
-+ unsigned max_threads;
-+ unsigned max_stack_entries;
-+ unsigned max_hw_contexts;
-+ unsigned max_gs_threads;
-+ unsigned sx_max_export_size;
-+ unsigned sx_max_export_pos_size;
-+ unsigned sx_max_export_smx_size;
-+ unsigned sq_num_cf_insts;
-+};
-+
-+/* RV770, RV7300, RV710 */
-+struct rv770_asic {
-+ unsigned max_pipes;
-+ unsigned max_tile_pipes;
-+ unsigned max_simds;
-+ unsigned max_backends;
-+ unsigned max_gprs;
-+ unsigned max_threads;
-+ unsigned max_stack_entries;
-+ unsigned max_hw_contexts;
-+ unsigned max_gs_threads;
-+ unsigned sx_max_export_size;
-+ unsigned sx_max_export_pos_size;
-+ unsigned sx_max_export_smx_size;
-+ unsigned sq_num_cf_insts;
-+ unsigned sx_num_of_sets;
-+ unsigned sc_prim_fifo_size;
-+ unsigned sc_hiz_tile_fifo_size;
-+ unsigned sc_earlyz_tile_fifo_fize;
-+};
-+
- #endif
-diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
-index 2882f40..aad0c6f 100644
---- a/drivers/gpu/drm/radeon/radeon_state.c
-+++ b/drivers/gpu/drm/radeon/radeon_state.c
-@@ -1546,7 +1546,7 @@ static void radeon_cp_dispatch_vertex(struct drm_device * dev,
- } while (i < nbox);
- }
-
--static void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
-+void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
- {
- drm_radeon_private_t *dev_priv = dev->dev_private;
- struct drm_radeon_master_private *master_priv = master->driver_priv;
-@@ -2213,7 +2213,10 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
- if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
-
-- radeon_cp_dispatch_swap(dev, file_priv->master);
-+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
-+ r600_cp_dispatch_swap(dev, file_priv);
-+ else
-+ radeon_cp_dispatch_swap(dev, file_priv->master);
- sarea_priv->ctx_owner = 0;
-
- COMMIT_RING();
-@@ -2412,7 +2415,10 @@ static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
- VB_AGE_TEST_WITH_RETURN(dev_priv);
-
-- ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
-+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
-+ ret = r600_cp_dispatch_texture(dev, file_priv, tex, &image);
-+ else
-+ ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
-
- return ret;
- }
-@@ -2495,8 +2501,9 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
- radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
- }
-
-- if (indirect->discard)
-+ if (indirect->discard) {
- radeon_cp_discard_buffer(dev, file_priv->master, buf);
-+ }
-
- COMMIT_RING();
- return 0;
-@@ -3227,7 +3234,8 @@ struct drm_ioctl_desc radeon_ioctls[] = {
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
-- DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH)
-+ DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
- };
-
- int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
-diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
-index 15c3531..acd889c 100644
---- a/drivers/gpu/drm/radeon/radeon_ttm.c
-+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
-@@ -35,11 +35,14 @@
- #include <ttm/ttm_module.h>
- #include <drm/drmP.h>
- #include <drm/radeon_drm.h>
-+#include <linux/seq_file.h>
- #include "radeon_reg.h"
- #include "radeon.h"
-
- #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
-+static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
-+
- static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
- {
- struct radeon_mman *mman;
-@@ -77,9 +80,25 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
- global_ref->release = &radeon_ttm_mem_global_release;
- r = ttm_global_item_ref(global_ref);
- if (r != 0) {
-- DRM_ERROR("Failed referencing a global TTM memory object.\n");
-+ DRM_ERROR("Failed setting up TTM memory accounting "
-+ "subsystem.\n");
-+ return r;
-+ }
-+
-+ rdev->mman.bo_global_ref.mem_glob =
-+ rdev->mman.mem_global_ref.object;
-+ global_ref = &rdev->mman.bo_global_ref.ref;
-+ global_ref->global_type = TTM_GLOBAL_TTM_BO;
-+ global_ref->size = sizeof(struct ttm_bo_global);
-+ global_ref->init = &ttm_bo_global_init;
-+ global_ref->release = &ttm_bo_global_release;
-+ r = ttm_global_item_ref(global_ref);
-+ if (r != 0) {
-+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-+ ttm_global_item_unref(&rdev->mman.mem_global_ref);
- return r;
- }
-+
- rdev->mman.mem_global_referenced = true;
- return 0;
- }
-@@ -87,6 +106,7 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
- static void radeon_ttm_global_fini(struct radeon_device *rdev)
- {
- if (rdev->mman.mem_global_referenced) {
-+ ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
- ttm_global_item_unref(&rdev->mman.mem_global_ref);
- rdev->mman.mem_global_referenced = false;
- }
-@@ -286,9 +306,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
- r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
- out_cleanup:
- if (tmp_mem.mm_node) {
-- spin_lock(&rdev->mman.bdev.lru_lock);
-+ struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-+
-+ spin_lock(&glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
-- spin_unlock(&rdev->mman.bdev.lru_lock);
-+ spin_unlock(&glob->lru_lock);
- return r;
- }
- return r;
-@@ -323,9 +345,11 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
- }
- out_cleanup:
- if (tmp_mem.mm_node) {
-- spin_lock(&rdev->mman.bdev.lru_lock);
-+ struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-+
-+ spin_lock(&glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
-- spin_unlock(&rdev->mman.bdev.lru_lock);
-+ spin_unlock(&glob->lru_lock);
- return r;
- }
- return r;
-@@ -352,9 +376,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
- radeon_move_null(bo, new_mem);
- return 0;
- }
-- if (!rdev->cp.ready) {
-+ if (!rdev->cp.ready || rdev->asic->copy == NULL) {
- /* use memcpy */
-- DRM_ERROR("CP is not ready use memcpy.\n");
- goto memcpy;
- }
-
-@@ -446,7 +469,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
- }
- /* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&rdev->mman.bdev,
-- rdev->mman.mem_global_ref.object,
-+ rdev->mman.bo_global_ref.ref.object,
- &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
- rdev->need_dma32);
- if (r) {
-@@ -471,7 +494,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
- return r;
- }
- DRM_INFO("radeon: %uM of VRAM memory ready\n",
-- rdev->mc.real_vram_size / (1024 * 1024));
-+ (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
- ((rdev->mc.gtt_size) >> PAGE_SHIFT));
- if (r) {
-@@ -479,10 +502,16 @@ int radeon_ttm_init(struct radeon_device *rdev)
- return r;
- }
- DRM_INFO("radeon: %uM of GTT memory ready.\n",
-- rdev->mc.gtt_size / (1024 * 1024));
-+ (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
- if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
- }
-+
-+ r = radeon_ttm_debugfs_init(rdev);
-+ if (r) {
-+ DRM_ERROR("Failed to init debugfs\n");
-+ return r;
-+ }
- return 0;
- }
-
-@@ -657,3 +686,50 @@ struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
- gtt->bound = false;
- return &gtt->backend;
- }
-+
-+#define RADEON_DEBUGFS_MEM_TYPES 2
-+
-+static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
-+static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
-+
-+#if defined(CONFIG_DEBUG_FS)
-+static int radeon_mm_dump_table(struct seq_file *m, void *data)
-+{
-+ struct drm_info_node *node = (struct drm_info_node *)m->private;
-+ struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
-+ struct drm_device *dev = node->minor->dev;
-+ struct radeon_device *rdev = dev->dev_private;
-+ int ret;
-+ struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-+
-+ spin_lock(&glob->lru_lock);
-+ ret = drm_mm_dump_table(m, mm);
-+ spin_unlock(&glob->lru_lock);
-+ return ret;
-+}
-+#endif
-+
-+static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
-+{
-+ unsigned i;
-+
-+#if defined(CONFIG_DEBUG_FS)
-+ for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
-+ if (i == 0)
-+ sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
-+ else
-+ sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
-+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-+ radeon_mem_types_list[i].show = &radeon_mm_dump_table;
-+ radeon_mem_types_list[i].driver_features = 0;
-+ if (i == 0)
-+ radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
-+ else
-+ radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
-+
-+ }
-+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
-+
-+#endif
-+ return 0;
-+}
-diff --git a/drivers/gpu/drm/radeon/reg_srcs/r100 b/drivers/gpu/drm/radeon/reg_srcs/r100
-new file mode 100644
-index 0000000..f7ee062
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/reg_srcs/r100
-@@ -0,0 +1,105 @@
-+r100 0x3294
-+0x1434 SRC_Y_X
-+0x1438 DST_Y_X
-+0x143C DST_HEIGHT_WIDTH
-+0x146C DP_GUI_MASTER_CNTL
-+0x1474 BRUSH_Y_X
-+0x1478 DP_BRUSH_BKGD_CLR
-+0x147C DP_BRUSH_FRGD_CLR
-+0x1480 BRUSH_DATA0
-+0x1484 BRUSH_DATA1
-+0x1598 DST_WIDTH_HEIGHT
-+0x15C0 CLR_CMP_CNTL
-+0x15C4 CLR_CMP_CLR_SRC
-+0x15C8 CLR_CMP_CLR_DST
-+0x15CC CLR_CMP_MSK
-+0x15D8 DP_SRC_FRGD_CLR
-+0x15DC DP_SRC_BKGD_CLR
-+0x1600 DST_LINE_START
-+0x1604 DST_LINE_END
-+0x1608 DST_LINE_PATCOUNT
-+0x16C0 DP_CNTL
-+0x16CC DP_WRITE_MSK
-+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
-+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
-+0x16EC SC_TOP_LEFT
-+0x16F0 SC_BOTTOM_RIGHT
-+0x16F4 SRC_SC_BOTTOM_RIGHT
-+0x1714 DSTCACHE_CTLSTAT
-+0x1720 WAIT_UNTIL
-+0x172C RBBM_GUICNTL
-+0x1810 FOG_3D_TABLE_START
-+0x1814 FOG_3D_TABLE_END
-+0x1a14 FOG_TABLE_INDEX
-+0x1a18 FOG_TABLE_DATA
-+0x1c14 PP_MISC
-+0x1c18 PP_FOG_COLOR
-+0x1c1c RE_SOLID_COLOR
-+0x1c20 RB3D_BLENDCNTL
-+0x1c4c SE_CNTL
-+0x1c50 SE_COORD_FMT
-+0x1c60 PP_TXCBLEND_0
-+0x1c64 PP_TXABLEND_0
-+0x1c68 PP_TFACTOR_0
-+0x1c78 PP_TXCBLEND_1
-+0x1c7c PP_TXABLEND_1
-+0x1c80 PP_TFACTOR_1
-+0x1c90 PP_TXCBLEND_2
-+0x1c94 PP_TXABLEND_2
-+0x1c98 PP_TFACTOR_2
-+0x1cc8 RE_STIPPLE_ADDR
-+0x1ccc RE_STIPPLE_DATA
-+0x1cd0 RE_LINE_PATTERN
-+0x1cd4 RE_LINE_STATE
-+0x1d40 PP_BORDER_COLOR0
-+0x1d44 PP_BORDER_COLOR1
-+0x1d48 PP_BORDER_COLOR2
-+0x1d7c RB3D_STENCILREFMASK
-+0x1d80 RB3D_ROPCNTL
-+0x1d84 RB3D_PLANEMASK
-+0x1d98 VAP_VPORT_XSCALE
-+0x1d9C VAP_VPORT_XOFFSET
-+0x1da0 VAP_VPORT_YSCALE
-+0x1da4 VAP_VPORT_YOFFSET
-+0x1da8 VAP_VPORT_ZSCALE
-+0x1dac VAP_VPORT_ZOFFSET
-+0x1db0 SE_ZBIAS_FACTOR
-+0x1db4 SE_ZBIAS_CONSTANT
-+0x1db8 SE_LINE_WIDTH
-+0x2140 SE_CNTL_STATUS
-+0x2200 SE_TCL_VECTOR_INDX_REG
-+0x2204 SE_TCL_VECTOR_DATA_REG
-+0x2208 SE_TCL_SCALAR_INDX_REG
-+0x220c SE_TCL_SCALAR_DATA_REG
-+0x2210 SE_TCL_MATERIAL_EMISSIVE_RED
-+0x2214 SE_TCL_MATERIAL_EMISSIVE_GREEN
-+0x2218 SE_TCL_MATERIAL_EMISSIVE_BLUE
-+0x221c SE_TCL_MATERIAL_EMISSIVE_ALPHA
-+0x2220 SE_TCL_MATERIAL_AMBIENT_RED
-+0x2224 SE_TCL_MATERIAL_AMBIENT_GREEN
-+0x2228 SE_TCL_MATERIAL_AMBIENT_BLUE
-+0x222c SE_TCL_MATERIAL_AMBIENT_ALPHA
-+0x2230 SE_TCL_MATERIAL_DIFFUSE_RED
-+0x2234 SE_TCL_MATERIAL_DIFFUSE_GREEN
-+0x2238 SE_TCL_MATERIAL_DIFFUSE_BLUE
-+0x223c SE_TCL_MATERIAL_DIFFUSE_ALPHA
-+0x2240 SE_TCL_MATERIAL_SPECULAR_RED
-+0x2244 SE_TCL_MATERIAL_SPECULAR_GREEN
-+0x2248 SE_TCL_MATERIAL_SPECULAR_BLUE
-+0x224c SE_TCL_MATERIAL_SPECULAR_ALPHA
-+0x2250 SE_TCL_SHININESS
-+0x2254 SE_TCL_OUTPUT_VTX_FMT
-+0x2258 SE_TCL_OUTPUT_VTX_SEL
-+0x225c SE_TCL_MATRIX_SELECT_0
-+0x2260 SE_TCL_MATRIX_SELECT_1
-+0x2264 SE_TCL_UCP_VERT_BLEND_CNTL
-+0x2268 SE_TCL_TEXTURE_PROC_CTL
-+0x226c SE_TCL_LIGHT_MODEL_CTL
-+0x2270 SE_TCL_PER_LIGHT_CTL_0
-+0x2274 SE_TCL_PER_LIGHT_CTL_1
-+0x2278 SE_TCL_PER_LIGHT_CTL_2
-+0x227c SE_TCL_PER_LIGHT_CTL_3
-+0x2284 SE_TCL_STATE_FLUSH
-+0x26c0 RE_TOP_LEFT
-+0x26c4 RE_MISC
-+0x3290 RB3D_ZPASS_DATA
-diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
-new file mode 100644
-index 0000000..6021c88
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
-@@ -0,0 +1,184 @@
-+r200 0x3294
-+0x1434 SRC_Y_X
-+0x1438 DST_Y_X
-+0x143C DST_HEIGHT_WIDTH
-+0x146C DP_GUI_MASTER_CNTL
-+0x1474 BRUSH_Y_X
-+0x1478 DP_BRUSH_BKGD_CLR
-+0x147C DP_BRUSH_FRGD_CLR
-+0x1480 BRUSH_DATA0
-+0x1484 BRUSH_DATA1
-+0x1598 DST_WIDTH_HEIGHT
-+0x15C0 CLR_CMP_CNTL
-+0x15C4 CLR_CMP_CLR_SRC
-+0x15C8 CLR_CMP_CLR_DST
-+0x15CC CLR_CMP_MSK
-+0x15D8 DP_SRC_FRGD_CLR
-+0x15DC DP_SRC_BKGD_CLR
-+0x1600 DST_LINE_START
-+0x1604 DST_LINE_END
-+0x1608 DST_LINE_PATCOUNT
-+0x16C0 DP_CNTL
-+0x16CC DP_WRITE_MSK
-+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
-+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
-+0x16EC SC_TOP_LEFT
-+0x16F0 SC_BOTTOM_RIGHT
-+0x16F4 SRC_SC_BOTTOM_RIGHT
-+0x1714 DSTCACHE_CTLSTAT
-+0x1720 WAIT_UNTIL
-+0x172C RBBM_GUICNTL
-+0x1c14 PP_MISC
-+0x1c18 PP_FOG_COLOR
-+0x1c1c RE_SOLID_COLOR
-+0x1c20 RB3D_BLENDCNTL
-+0x1c4c SE_CNTL
-+0x1c50 RE_CNTL
-+0x1cc8 RE_STIPPLE_ADDR
-+0x1ccc RE_STIPPLE_DATA
-+0x1cd0 RE_LINE_PATTERN
-+0x1cd4 RE_LINE_STATE
-+0x1cd8 RE_SCISSOR_TL_0
-+0x1cdc RE_SCISSOR_BR_0
-+0x1ce0 RE_SCISSOR_TL_1
-+0x1ce4 RE_SCISSOR_BR_1
-+0x1ce8 RE_SCISSOR_TL_2
-+0x1cec RE_SCISSOR_BR_2
-+0x1d60 RB3D_DEPTHXY_OFFSET
-+0x1d7c RB3D_STENCILREFMASK
-+0x1d80 RB3D_ROPCNTL
-+0x1d84 RB3D_PLANEMASK
-+0x1d98 VAP_VPORT_XSCALE
-+0x1d9c VAP_VPORT_XOFFSET
-+0x1da0 VAP_VPORT_YSCALE
-+0x1da4 VAP_VPORT_YOFFSET
-+0x1da8 VAP_VPORT_ZSCALE
-+0x1dac VAP_VPORT_ZOFFSET
-+0x1db0 SE_ZBIAS_FACTOR
-+0x1db4 SE_ZBIAS_CONSTANT
-+0x1db8 SE_LINE_WIDTH
-+0x2080 SE_VAP_CNTL
-+0x2090 SE_TCL_OUTPUT_VTX_FMT_0
-+0x2094 SE_TCL_OUTPUT_VTX_FMT_1
-+0x20b0 SE_VTE_CNTL
-+0x2140 SE_CNTL_STATUS
-+0x2180 SE_VTX_STATE_CNTL
-+0x2200 SE_TCL_VECTOR_INDX_REG
-+0x2204 SE_TCL_VECTOR_DATA_REG
-+0x2208 SE_TCL_SCALAR_INDX_REG
-+0x220c SE_TCL_SCALAR_DATA_REG
-+0x2230 SE_TCL_MATRIX_SEL_0
-+0x2234 SE_TCL_MATRIX_SEL_1
-+0x2238 SE_TCL_MATRIX_SEL_2
-+0x223c SE_TCL_MATRIX_SEL_3
-+0x2240 SE_TCL_MATRIX_SEL_4
-+0x2250 SE_TCL_OUTPUT_VTX_COMP_SEL
-+0x2254 SE_TCL_INPUT_VTX_VECTOR_ADDR_0
-+0x2258 SE_TCL_INPUT_VTX_VECTOR_ADDR_1
-+0x225c SE_TCL_INPUT_VTX_VECTOR_ADDR_2
-+0x2260 SE_TCL_INPUT_VTX_VECTOR_ADDR_3
-+0x2268 SE_TCL_LIGHT_MODEL_CTL_0
-+0x226c SE_TCL_LIGHT_MODEL_CTL_1
-+0x2270 SE_TCL_PER_LIGHT_CTL_0
-+0x2274 SE_TCL_PER_LIGHT_CTL_1
-+0x2278 SE_TCL_PER_LIGHT_CTL_2
-+0x227c SE_TCL_PER_LIGHT_CTL_3
-+0x2284 VAP_PVS_STATE_FLUSH_REG
-+0x22a8 SE_TCL_TEX_PROC_CTL_2
-+0x22ac SE_TCL_TEX_PROC_CTL_3
-+0x22b0 SE_TCL_TEX_PROC_CTL_0
-+0x22b4 SE_TCL_TEX_PROC_CTL_1
-+0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
-+0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
-+0x22c4 SE_TCL_POINT_SPRITE_CNTL
-+0x2648 RE_POINTSIZE
-+0x26c0 RE_TOP_LEFT
-+0x26c4 RE_MISC
-+0x26f0 RE_AUX_SCISSOR_CNTL
-+0x2c14 PP_BORDER_COLOR_0
-+0x2c34 PP_BORDER_COLOR_1
-+0x2c54 PP_BORDER_COLOR_2
-+0x2c74 PP_BORDER_COLOR_3
-+0x2c94 PP_BORDER_COLOR_4
-+0x2cb4 PP_BORDER_COLOR_5
-+0x2cc4 PP_CNTL_X
-+0x2cf8 PP_TRI_PERF
-+0x2cfc PP_PERF_CNTL
-+0x2d9c PP_TAM_DEBUG3
-+0x2ee0 PP_TFACTOR_0
-+0x2ee4 PP_TFACTOR_1
-+0x2ee8 PP_TFACTOR_2
-+0x2eec PP_TFACTOR_3
-+0x2ef0 PP_TFACTOR_4
-+0x2ef4 PP_TFACTOR_5
-+0x2ef8 PP_TFACTOR_6
-+0x2efc PP_TFACTOR_7
-+0x2f00 PP_TXCBLEND_0
-+0x2f04 PP_TXCBLEND2_0
-+0x2f08 PP_TXABLEND_0
-+0x2f0c PP_TXABLEND2_0
-+0x2f10 PP_TXCBLEND_1
-+0x2f14 PP_TXCBLEND2_1
-+0x2f18 PP_TXABLEND_1
-+0x2f1c PP_TXABLEND2_1
-+0x2f20 PP_TXCBLEND_2
-+0x2f24 PP_TXCBLEND2_2
-+0x2f28 PP_TXABLEND_2
-+0x2f2c PP_TXABLEND2_2
-+0x2f30 PP_TXCBLEND_3
-+0x2f34 PP_TXCBLEND2_3
-+0x2f38 PP_TXABLEND_3
-+0x2f3c PP_TXABLEND2_3
-+0x2f40 PP_TXCBLEND_4
-+0x2f44 PP_TXCBLEND2_4
-+0x2f48 PP_TXABLEND_4
-+0x2f4c PP_TXABLEND2_4
-+0x2f50 PP_TXCBLEND_5
-+0x2f54 PP_TXCBLEND2_5
-+0x2f58 PP_TXABLEND_5
-+0x2f5c PP_TXABLEND2_5
-+0x2f60 PP_TXCBLEND_6
-+0x2f64 PP_TXCBLEND2_6
-+0x2f68 PP_TXABLEND_6
-+0x2f6c PP_TXABLEND2_6
-+0x2f70 PP_TXCBLEND_7
-+0x2f74 PP_TXCBLEND2_7
-+0x2f78 PP_TXABLEND_7
-+0x2f7c PP_TXABLEND2_7
-+0x2f80 PP_TXCBLEND_8
-+0x2f84 PP_TXCBLEND2_8
-+0x2f88 PP_TXABLEND_8
-+0x2f8c PP_TXABLEND2_8
-+0x2f90 PP_TXCBLEND_9
-+0x2f94 PP_TXCBLEND2_9
-+0x2f98 PP_TXABLEND_9
-+0x2f9c PP_TXABLEND2_9
-+0x2fa0 PP_TXCBLEND_10
-+0x2fa4 PP_TXCBLEND2_10
-+0x2fa8 PP_TXABLEND_10
-+0x2fac PP_TXABLEND2_10
-+0x2fb0 PP_TXCBLEND_11
-+0x2fb4 PP_TXCBLEND2_11
-+0x2fb8 PP_TXABLEND_11
-+0x2fbc PP_TXABLEND2_11
-+0x2fc0 PP_TXCBLEND_12
-+0x2fc4 PP_TXCBLEND2_12
-+0x2fc8 PP_TXABLEND_12
-+0x2fcc PP_TXABLEND2_12
-+0x2fd0 PP_TXCBLEND_13
-+0x2fd4 PP_TXCBLEND2_13
-+0x2fd8 PP_TXABLEND_13
-+0x2fdc PP_TXABLEND2_13
-+0x2fe0 PP_TXCBLEND_14
-+0x2fe4 PP_TXCBLEND2_14
-+0x2fe8 PP_TXABLEND_14
-+0x2fec PP_TXABLEND2_14
-+0x2ff0 PP_TXCBLEND_15
-+0x2ff4 PP_TXCBLEND2_15
-+0x2ff8 PP_TXABLEND_15
-+0x2ffc PP_TXABLEND2_15
-+0x3218 RB3D_BLENCOLOR
-+0x321c RB3D_ABLENDCNTL
-+0x3220 RB3D_CBLENDCNTL
-+0x3290 RB3D_ZPASS_DATA
-+
-diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
-new file mode 100644
-index 0000000..19c4663
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
-@@ -0,0 +1,729 @@
-+r300 0x4f60
-+0x1434 SRC_Y_X
-+0x1438 DST_Y_X
-+0x143C DST_HEIGHT_WIDTH
-+0x146C DP_GUI_MASTER_CNTL
-+0x1474 BRUSH_Y_X
-+0x1478 DP_BRUSH_BKGD_CLR
-+0x147C DP_BRUSH_FRGD_CLR
-+0x1480 BRUSH_DATA0
-+0x1484 BRUSH_DATA1
-+0x1598 DST_WIDTH_HEIGHT
-+0x15C0 CLR_CMP_CNTL
-+0x15C4 CLR_CMP_CLR_SRC
-+0x15C8 CLR_CMP_CLR_DST
-+0x15CC CLR_CMP_MSK
-+0x15D8 DP_SRC_FRGD_CLR
-+0x15DC DP_SRC_BKGD_CLR
-+0x1600 DST_LINE_START
-+0x1604 DST_LINE_END
-+0x1608 DST_LINE_PATCOUNT
-+0x16C0 DP_CNTL
-+0x16CC DP_WRITE_MSK
-+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
-+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
-+0x16EC SC_TOP_LEFT
-+0x16F0 SC_BOTTOM_RIGHT
-+0x16F4 SRC_SC_BOTTOM_RIGHT
-+0x1714 DSTCACHE_CTLSTAT
-+0x1720 WAIT_UNTIL
-+0x172C RBBM_GUICNTL
-+0x1D98 VAP_VPORT_XSCALE
-+0x1D9C VAP_VPORT_XOFFSET
-+0x1DA0 VAP_VPORT_YSCALE
-+0x1DA4 VAP_VPORT_YOFFSET
-+0x1DA8 VAP_VPORT_ZSCALE
-+0x1DAC VAP_VPORT_ZOFFSET
-+0x2080 VAP_CNTL
-+0x2090 VAP_OUT_VTX_FMT_0
-+0x2094 VAP_OUT_VTX_FMT_1
-+0x20B0 VAP_VTE_CNTL
-+0x2138 VAP_VF_MIN_VTX_INDX
-+0x2140 VAP_CNTL_STATUS
-+0x2150 VAP_PROG_STREAM_CNTL_0
-+0x2154 VAP_PROG_STREAM_CNTL_1
-+0x2158 VAP_PROG_STREAM_CNTL_2
-+0x215C VAP_PROG_STREAM_CNTL_3
-+0x2160 VAP_PROG_STREAM_CNTL_4
-+0x2164 VAP_PROG_STREAM_CNTL_5
-+0x2168 VAP_PROG_STREAM_CNTL_6
-+0x216C VAP_PROG_STREAM_CNTL_7
-+0x2180 VAP_VTX_STATE_CNTL
-+0x2184 VAP_VSM_VTX_ASSM
-+0x2188 VAP_VTX_STATE_IND_REG_0
-+0x218C VAP_VTX_STATE_IND_REG_1
-+0x2190 VAP_VTX_STATE_IND_REG_2
-+0x2194 VAP_VTX_STATE_IND_REG_3
-+0x2198 VAP_VTX_STATE_IND_REG_4
-+0x219C VAP_VTX_STATE_IND_REG_5
-+0x21A0 VAP_VTX_STATE_IND_REG_6
-+0x21A4 VAP_VTX_STATE_IND_REG_7
-+0x21A8 VAP_VTX_STATE_IND_REG_8
-+0x21AC VAP_VTX_STATE_IND_REG_9
-+0x21B0 VAP_VTX_STATE_IND_REG_10
-+0x21B4 VAP_VTX_STATE_IND_REG_11
-+0x21B8 VAP_VTX_STATE_IND_REG_12
-+0x21BC VAP_VTX_STATE_IND_REG_13
-+0x21C0 VAP_VTX_STATE_IND_REG_14
-+0x21C4 VAP_VTX_STATE_IND_REG_15
-+0x21DC VAP_PSC_SGN_NORM_CNTL
-+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
-+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
-+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
-+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
-+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
-+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
-+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
-+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
-+0x2200 VAP_PVS_VECTOR_INDX_REG
-+0x2204 VAP_PVS_VECTOR_DATA_REG
-+0x2208 VAP_PVS_VECTOR_DATA_REG_128
-+0x221C VAP_CLIP_CNTL
-+0x2220 VAP_GB_VERT_CLIP_ADJ
-+0x2224 VAP_GB_VERT_DISC_ADJ
-+0x2228 VAP_GB_HORZ_CLIP_ADJ
-+0x222C VAP_GB_HORZ_DISC_ADJ
-+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
-+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
-+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
-+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
-+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
-+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
-+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
-+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
-+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
-+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
-+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
-+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
-+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
-+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
-+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
-+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
-+0x2284 VAP_PVS_STATE_FLUSH_REG
-+0x2288 VAP_PVS_VTX_TIMEOUT_REG
-+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
-+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
-+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
-+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
-+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
-+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
-+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
-+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
-+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
-+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
-+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
-+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
-+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
-+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
-+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
-+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
-+0x22D0 VAP_PVS_CODE_CNTL_0
-+0x22D4 VAP_PVS_CONST_CNTL
-+0x22D8 VAP_PVS_CODE_CNTL_1
-+0x22DC VAP_PVS_FLOW_CNTL_OPC
-+0x342C RB2D_DSTCACHE_CTLSTAT
-+0x4000 GB_VAP_RASTER_VTX_FMT_0
-+0x4004 GB_VAP_RASTER_VTX_FMT_1
-+0x4008 GB_ENABLE
-+0x401C GB_SELECT
-+0x4020 GB_AA_CONFIG
-+0x4024 GB_FIFO_SIZE
-+0x4100 TX_INVALTAGS
-+0x4200 GA_POINT_S0
-+0x4204 GA_POINT_T0
-+0x4208 GA_POINT_S1
-+0x420C GA_POINT_T1
-+0x4214 GA_TRIANGLE_STIPPLE
-+0x421C GA_POINT_SIZE
-+0x4230 GA_POINT_MINMAX
-+0x4234 GA_LINE_CNTL
-+0x4238 GA_LINE_STIPPLE_CONFIG
-+0x4260 GA_LINE_STIPPLE_VALUE
-+0x4264 GA_LINE_S0
-+0x4268 GA_LINE_S1
-+0x4278 GA_COLOR_CONTROL
-+0x427C GA_SOLID_RG
-+0x4280 GA_SOLID_BA
-+0x4288 GA_POLY_MODE
-+0x428C GA_ROUND_MODE
-+0x4290 GA_OFFSET
-+0x4294 GA_FOG_SCALE
-+0x4298 GA_FOG_OFFSET
-+0x42A0 SU_TEX_WRAP
-+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
-+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
-+0x42AC SU_POLY_OFFSET_BACK_SCALE
-+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
-+0x42B4 SU_POLY_OFFSET_ENABLE
-+0x42B8 SU_CULL_MODE
-+0x42C0 SU_DEPTH_SCALE
-+0x42C4 SU_DEPTH_OFFSET
-+0x42C8 SU_REG_DEST
-+0x4300 RS_COUNT
-+0x4304 RS_INST_COUNT
-+0x4310 RS_IP_0
-+0x4314 RS_IP_1
-+0x4318 RS_IP_2
-+0x431C RS_IP_3
-+0x4320 RS_IP_4
-+0x4324 RS_IP_5
-+0x4328 RS_IP_6
-+0x432C RS_IP_7
-+0x4330 RS_INST_0
-+0x4334 RS_INST_1
-+0x4338 RS_INST_2
-+0x433C RS_INST_3
-+0x4340 RS_INST_4
-+0x4344 RS_INST_5
-+0x4348 RS_INST_6
-+0x434C RS_INST_7
-+0x4350 RS_INST_8
-+0x4354 RS_INST_9
-+0x4358 RS_INST_10
-+0x435C RS_INST_11
-+0x4360 RS_INST_12
-+0x4364 RS_INST_13
-+0x4368 RS_INST_14
-+0x436C RS_INST_15
-+0x43A4 SC_HYPERZ_EN
-+0x43A8 SC_EDGERULE
-+0x43B0 SC_CLIP_0_A
-+0x43B4 SC_CLIP_0_B
-+0x43B8 SC_CLIP_1_A
-+0x43BC SC_CLIP_1_B
-+0x43C0 SC_CLIP_2_A
-+0x43C4 SC_CLIP_2_B
-+0x43C8 SC_CLIP_3_A
-+0x43CC SC_CLIP_3_B
-+0x43D0 SC_CLIP_RULE
-+0x43E0 SC_SCISSOR0
-+0x43E8 SC_SCREENDOOR
-+0x4440 TX_FILTER1_0
-+0x4444 TX_FILTER1_1
-+0x4448 TX_FILTER1_2
-+0x444C TX_FILTER1_3
-+0x4450 TX_FILTER1_4
-+0x4454 TX_FILTER1_5
-+0x4458 TX_FILTER1_6
-+0x445C TX_FILTER1_7
-+0x4460 TX_FILTER1_8
-+0x4464 TX_FILTER1_9
-+0x4468 TX_FILTER1_10
-+0x446C TX_FILTER1_11
-+0x4470 TX_FILTER1_12
-+0x4474 TX_FILTER1_13
-+0x4478 TX_FILTER1_14
-+0x447C TX_FILTER1_15
-+0x4580 TX_CHROMA_KEY_0
-+0x4584 TX_CHROMA_KEY_1
-+0x4588 TX_CHROMA_KEY_2
-+0x458C TX_CHROMA_KEY_3
-+0x4590 TX_CHROMA_KEY_4
-+0x4594 TX_CHROMA_KEY_5
-+0x4598 TX_CHROMA_KEY_6
-+0x459C TX_CHROMA_KEY_7
-+0x45A0 TX_CHROMA_KEY_8
-+0x45A4 TX_CHROMA_KEY_9
-+0x45A8 TX_CHROMA_KEY_10
-+0x45AC TX_CHROMA_KEY_11
-+0x45B0 TX_CHROMA_KEY_12
-+0x45B4 TX_CHROMA_KEY_13
-+0x45B8 TX_CHROMA_KEY_14
-+0x45BC TX_CHROMA_KEY_15
-+0x45C0 TX_BORDER_COLOR_0
-+0x45C4 TX_BORDER_COLOR_1
-+0x45C8 TX_BORDER_COLOR_2
-+0x45CC TX_BORDER_COLOR_3
-+0x45D0 TX_BORDER_COLOR_4
-+0x45D4 TX_BORDER_COLOR_5
-+0x45D8 TX_BORDER_COLOR_6
-+0x45DC TX_BORDER_COLOR_7
-+0x45E0 TX_BORDER_COLOR_8
-+0x45E4 TX_BORDER_COLOR_9
-+0x45E8 TX_BORDER_COLOR_10
-+0x45EC TX_BORDER_COLOR_11
-+0x45F0 TX_BORDER_COLOR_12
-+0x45F4 TX_BORDER_COLOR_13
-+0x45F8 TX_BORDER_COLOR_14
-+0x45FC TX_BORDER_COLOR_15
-+0x4600 US_CONFIG
-+0x4604 US_PIXSIZE
-+0x4608 US_CODE_OFFSET
-+0x460C US_RESET
-+0x4610 US_CODE_ADDR_0
-+0x4614 US_CODE_ADDR_1
-+0x4618 US_CODE_ADDR_2
-+0x461C US_CODE_ADDR_3
-+0x4620 US_TEX_INST_0
-+0x4624 US_TEX_INST_1
-+0x4628 US_TEX_INST_2
-+0x462C US_TEX_INST_3
-+0x4630 US_TEX_INST_4
-+0x4634 US_TEX_INST_5
-+0x4638 US_TEX_INST_6
-+0x463C US_TEX_INST_7
-+0x4640 US_TEX_INST_8
-+0x4644 US_TEX_INST_9
-+0x4648 US_TEX_INST_10
-+0x464C US_TEX_INST_11
-+0x4650 US_TEX_INST_12
-+0x4654 US_TEX_INST_13
-+0x4658 US_TEX_INST_14
-+0x465C US_TEX_INST_15
-+0x4660 US_TEX_INST_16
-+0x4664 US_TEX_INST_17
-+0x4668 US_TEX_INST_18
-+0x466C US_TEX_INST_19
-+0x4670 US_TEX_INST_20
-+0x4674 US_TEX_INST_21
-+0x4678 US_TEX_INST_22
-+0x467C US_TEX_INST_23
-+0x4680 US_TEX_INST_24
-+0x4684 US_TEX_INST_25
-+0x4688 US_TEX_INST_26
-+0x468C US_TEX_INST_27
-+0x4690 US_TEX_INST_28
-+0x4694 US_TEX_INST_29
-+0x4698 US_TEX_INST_30
-+0x469C US_TEX_INST_31
-+0x46A4 US_OUT_FMT_0
-+0x46A8 US_OUT_FMT_1
-+0x46AC US_OUT_FMT_2
-+0x46B0 US_OUT_FMT_3
-+0x46B4 US_W_FMT
-+0x46C0 US_ALU_RGB_ADDR_0
-+0x46C4 US_ALU_RGB_ADDR_1
-+0x46C8 US_ALU_RGB_ADDR_2
-+0x46CC US_ALU_RGB_ADDR_3
-+0x46D0 US_ALU_RGB_ADDR_4
-+0x46D4 US_ALU_RGB_ADDR_5
-+0x46D8 US_ALU_RGB_ADDR_6
-+0x46DC US_ALU_RGB_ADDR_7
-+0x46E0 US_ALU_RGB_ADDR_8
-+0x46E4 US_ALU_RGB_ADDR_9
-+0x46E8 US_ALU_RGB_ADDR_10
-+0x46EC US_ALU_RGB_ADDR_11
-+0x46F0 US_ALU_RGB_ADDR_12
-+0x46F4 US_ALU_RGB_ADDR_13
-+0x46F8 US_ALU_RGB_ADDR_14
-+0x46FC US_ALU_RGB_ADDR_15
-+0x4700 US_ALU_RGB_ADDR_16
-+0x4704 US_ALU_RGB_ADDR_17
-+0x4708 US_ALU_RGB_ADDR_18
-+0x470C US_ALU_RGB_ADDR_19
-+0x4710 US_ALU_RGB_ADDR_20
-+0x4714 US_ALU_RGB_ADDR_21
-+0x4718 US_ALU_RGB_ADDR_22
-+0x471C US_ALU_RGB_ADDR_23
-+0x4720 US_ALU_RGB_ADDR_24
-+0x4724 US_ALU_RGB_ADDR_25
-+0x4728 US_ALU_RGB_ADDR_26
-+0x472C US_ALU_RGB_ADDR_27
-+0x4730 US_ALU_RGB_ADDR_28
-+0x4734 US_ALU_RGB_ADDR_29
-+0x4738 US_ALU_RGB_ADDR_30
-+0x473C US_ALU_RGB_ADDR_31
-+0x4740 US_ALU_RGB_ADDR_32
-+0x4744 US_ALU_RGB_ADDR_33
-+0x4748 US_ALU_RGB_ADDR_34
-+0x474C US_ALU_RGB_ADDR_35
-+0x4750 US_ALU_RGB_ADDR_36
-+0x4754 US_ALU_RGB_ADDR_37
-+0x4758 US_ALU_RGB_ADDR_38
-+0x475C US_ALU_RGB_ADDR_39
-+0x4760 US_ALU_RGB_ADDR_40
-+0x4764 US_ALU_RGB_ADDR_41
-+0x4768 US_ALU_RGB_ADDR_42
-+0x476C US_ALU_RGB_ADDR_43
-+0x4770 US_ALU_RGB_ADDR_44
-+0x4774 US_ALU_RGB_ADDR_45
-+0x4778 US_ALU_RGB_ADDR_46
-+0x477C US_ALU_RGB_ADDR_47
-+0x4780 US_ALU_RGB_ADDR_48
-+0x4784 US_ALU_RGB_ADDR_49
-+0x4788 US_ALU_RGB_ADDR_50
-+0x478C US_ALU_RGB_ADDR_51
-+0x4790 US_ALU_RGB_ADDR_52
-+0x4794 US_ALU_RGB_ADDR_53
-+0x4798 US_ALU_RGB_ADDR_54
-+0x479C US_ALU_RGB_ADDR_55
-+0x47A0 US_ALU_RGB_ADDR_56
-+0x47A4 US_ALU_RGB_ADDR_57
-+0x47A8 US_ALU_RGB_ADDR_58
-+0x47AC US_ALU_RGB_ADDR_59
-+0x47B0 US_ALU_RGB_ADDR_60
-+0x47B4 US_ALU_RGB_ADDR_61
-+0x47B8 US_ALU_RGB_ADDR_62
-+0x47BC US_ALU_RGB_ADDR_63
-+0x47C0 US_ALU_ALPHA_ADDR_0
-+0x47C4 US_ALU_ALPHA_ADDR_1
-+0x47C8 US_ALU_ALPHA_ADDR_2
-+0x47CC US_ALU_ALPHA_ADDR_3
-+0x47D0 US_ALU_ALPHA_ADDR_4
-+0x47D4 US_ALU_ALPHA_ADDR_5
-+0x47D8 US_ALU_ALPHA_ADDR_6
-+0x47DC US_ALU_ALPHA_ADDR_7
-+0x47E0 US_ALU_ALPHA_ADDR_8
-+0x47E4 US_ALU_ALPHA_ADDR_9
-+0x47E8 US_ALU_ALPHA_ADDR_10
-+0x47EC US_ALU_ALPHA_ADDR_11
-+0x47F0 US_ALU_ALPHA_ADDR_12
-+0x47F4 US_ALU_ALPHA_ADDR_13
-+0x47F8 US_ALU_ALPHA_ADDR_14
-+0x47FC US_ALU_ALPHA_ADDR_15
-+0x4800 US_ALU_ALPHA_ADDR_16
-+0x4804 US_ALU_ALPHA_ADDR_17
-+0x4808 US_ALU_ALPHA_ADDR_18
-+0x480C US_ALU_ALPHA_ADDR_19
-+0x4810 US_ALU_ALPHA_ADDR_20
-+0x4814 US_ALU_ALPHA_ADDR_21
-+0x4818 US_ALU_ALPHA_ADDR_22
-+0x481C US_ALU_ALPHA_ADDR_23
-+0x4820 US_ALU_ALPHA_ADDR_24
-+0x4824 US_ALU_ALPHA_ADDR_25
-+0x4828 US_ALU_ALPHA_ADDR_26
-+0x482C US_ALU_ALPHA_ADDR_27
-+0x4830 US_ALU_ALPHA_ADDR_28
-+0x4834 US_ALU_ALPHA_ADDR_29
-+0x4838 US_ALU_ALPHA_ADDR_30
-+0x483C US_ALU_ALPHA_ADDR_31
-+0x4840 US_ALU_ALPHA_ADDR_32
-+0x4844 US_ALU_ALPHA_ADDR_33
-+0x4848 US_ALU_ALPHA_ADDR_34
-+0x484C US_ALU_ALPHA_ADDR_35
-+0x4850 US_ALU_ALPHA_ADDR_36
-+0x4854 US_ALU_ALPHA_ADDR_37
-+0x4858 US_ALU_ALPHA_ADDR_38
-+0x485C US_ALU_ALPHA_ADDR_39
-+0x4860 US_ALU_ALPHA_ADDR_40
-+0x4864 US_ALU_ALPHA_ADDR_41
-+0x4868 US_ALU_ALPHA_ADDR_42
-+0x486C US_ALU_ALPHA_ADDR_43
-+0x4870 US_ALU_ALPHA_ADDR_44
-+0x4874 US_ALU_ALPHA_ADDR_45
-+0x4878 US_ALU_ALPHA_ADDR_46
-+0x487C US_ALU_ALPHA_ADDR_47
-+0x4880 US_ALU_ALPHA_ADDR_48
-+0x4884 US_ALU_ALPHA_ADDR_49
-+0x4888 US_ALU_ALPHA_ADDR_50
-+0x488C US_ALU_ALPHA_ADDR_51
-+0x4890 US_ALU_ALPHA_ADDR_52
-+0x4894 US_ALU_ALPHA_ADDR_53
-+0x4898 US_ALU_ALPHA_ADDR_54
-+0x489C US_ALU_ALPHA_ADDR_55
-+0x48A0 US_ALU_ALPHA_ADDR_56
-+0x48A4 US_ALU_ALPHA_ADDR_57
-+0x48A8 US_ALU_ALPHA_ADDR_58
-+0x48AC US_ALU_ALPHA_ADDR_59
-+0x48B0 US_ALU_ALPHA_ADDR_60
-+0x48B4 US_ALU_ALPHA_ADDR_61
-+0x48B8 US_ALU_ALPHA_ADDR_62
-+0x48BC US_ALU_ALPHA_ADDR_63
-+0x48C0 US_ALU_RGB_INST_0
-+0x48C4 US_ALU_RGB_INST_1
-+0x48C8 US_ALU_RGB_INST_2
-+0x48CC US_ALU_RGB_INST_3
-+0x48D0 US_ALU_RGB_INST_4
-+0x48D4 US_ALU_RGB_INST_5
-+0x48D8 US_ALU_RGB_INST_6
-+0x48DC US_ALU_RGB_INST_7
-+0x48E0 US_ALU_RGB_INST_8
-+0x48E4 US_ALU_RGB_INST_9
-+0x48E8 US_ALU_RGB_INST_10
-+0x48EC US_ALU_RGB_INST_11
-+0x48F0 US_ALU_RGB_INST_12
-+0x48F4 US_ALU_RGB_INST_13
-+0x48F8 US_ALU_RGB_INST_14
-+0x48FC US_ALU_RGB_INST_15
-+0x4900 US_ALU_RGB_INST_16
-+0x4904 US_ALU_RGB_INST_17
-+0x4908 US_ALU_RGB_INST_18
-+0x490C US_ALU_RGB_INST_19
-+0x4910 US_ALU_RGB_INST_20
-+0x4914 US_ALU_RGB_INST_21
-+0x4918 US_ALU_RGB_INST_22
-+0x491C US_ALU_RGB_INST_23
-+0x4920 US_ALU_RGB_INST_24
-+0x4924 US_ALU_RGB_INST_25
-+0x4928 US_ALU_RGB_INST_26
-+0x492C US_ALU_RGB_INST_27
-+0x4930 US_ALU_RGB_INST_28
-+0x4934 US_ALU_RGB_INST_29
-+0x4938 US_ALU_RGB_INST_30
-+0x493C US_ALU_RGB_INST_31
-+0x4940 US_ALU_RGB_INST_32
-+0x4944 US_ALU_RGB_INST_33
-+0x4948 US_ALU_RGB_INST_34
-+0x494C US_ALU_RGB_INST_35
-+0x4950 US_ALU_RGB_INST_36
-+0x4954 US_ALU_RGB_INST_37
-+0x4958 US_ALU_RGB_INST_38
-+0x495C US_ALU_RGB_INST_39
-+0x4960 US_ALU_RGB_INST_40
-+0x4964 US_ALU_RGB_INST_41
-+0x4968 US_ALU_RGB_INST_42
-+0x496C US_ALU_RGB_INST_43
-+0x4970 US_ALU_RGB_INST_44
-+0x4974 US_ALU_RGB_INST_45
-+0x4978 US_ALU_RGB_INST_46
-+0x497C US_ALU_RGB_INST_47
-+0x4980 US_ALU_RGB_INST_48
-+0x4984 US_ALU_RGB_INST_49
-+0x4988 US_ALU_RGB_INST_50
-+0x498C US_ALU_RGB_INST_51
-+0x4990 US_ALU_RGB_INST_52
-+0x4994 US_ALU_RGB_INST_53
-+0x4998 US_ALU_RGB_INST_54
-+0x499C US_ALU_RGB_INST_55
-+0x49A0 US_ALU_RGB_INST_56
-+0x49A4 US_ALU_RGB_INST_57
-+0x49A8 US_ALU_RGB_INST_58
-+0x49AC US_ALU_RGB_INST_59
-+0x49B0 US_ALU_RGB_INST_60
-+0x49B4 US_ALU_RGB_INST_61
-+0x49B8 US_ALU_RGB_INST_62
-+0x49BC US_ALU_RGB_INST_63
-+0x49C0 US_ALU_ALPHA_INST_0
-+0x49C4 US_ALU_ALPHA_INST_1
-+0x49C8 US_ALU_ALPHA_INST_2
-+0x49CC US_ALU_ALPHA_INST_3
-+0x49D0 US_ALU_ALPHA_INST_4
-+0x49D4 US_ALU_ALPHA_INST_5
-+0x49D8 US_ALU_ALPHA_INST_6
-+0x49DC US_ALU_ALPHA_INST_7
-+0x49E0 US_ALU_ALPHA_INST_8
-+0x49E4 US_ALU_ALPHA_INST_9
-+0x49E8 US_ALU_ALPHA_INST_10
-+0x49EC US_ALU_ALPHA_INST_11
-+0x49F0 US_ALU_ALPHA_INST_12
-+0x49F4 US_ALU_ALPHA_INST_13
-+0x49F8 US_ALU_ALPHA_INST_14
-+0x49FC US_ALU_ALPHA_INST_15
-+0x4A00 US_ALU_ALPHA_INST_16
-+0x4A04 US_ALU_ALPHA_INST_17
-+0x4A08 US_ALU_ALPHA_INST_18
-+0x4A0C US_ALU_ALPHA_INST_19
-+0x4A10 US_ALU_ALPHA_INST_20
-+0x4A14 US_ALU_ALPHA_INST_21
-+0x4A18 US_ALU_ALPHA_INST_22
-+0x4A1C US_ALU_ALPHA_INST_23
-+0x4A20 US_ALU_ALPHA_INST_24
-+0x4A24 US_ALU_ALPHA_INST_25
-+0x4A28 US_ALU_ALPHA_INST_26
-+0x4A2C US_ALU_ALPHA_INST_27
-+0x4A30 US_ALU_ALPHA_INST_28
-+0x4A34 US_ALU_ALPHA_INST_29
-+0x4A38 US_ALU_ALPHA_INST_30
-+0x4A3C US_ALU_ALPHA_INST_31
-+0x4A40 US_ALU_ALPHA_INST_32
-+0x4A44 US_ALU_ALPHA_INST_33
-+0x4A48 US_ALU_ALPHA_INST_34
-+0x4A4C US_ALU_ALPHA_INST_35
-+0x4A50 US_ALU_ALPHA_INST_36
-+0x4A54 US_ALU_ALPHA_INST_37
-+0x4A58 US_ALU_ALPHA_INST_38
-+0x4A5C US_ALU_ALPHA_INST_39
-+0x4A60 US_ALU_ALPHA_INST_40
-+0x4A64 US_ALU_ALPHA_INST_41
-+0x4A68 US_ALU_ALPHA_INST_42
-+0x4A6C US_ALU_ALPHA_INST_43
-+0x4A70 US_ALU_ALPHA_INST_44
-+0x4A74 US_ALU_ALPHA_INST_45
-+0x4A78 US_ALU_ALPHA_INST_46
-+0x4A7C US_ALU_ALPHA_INST_47
-+0x4A80 US_ALU_ALPHA_INST_48
-+0x4A84 US_ALU_ALPHA_INST_49
-+0x4A88 US_ALU_ALPHA_INST_50
-+0x4A8C US_ALU_ALPHA_INST_51
-+0x4A90 US_ALU_ALPHA_INST_52
-+0x4A94 US_ALU_ALPHA_INST_53
-+0x4A98 US_ALU_ALPHA_INST_54
-+0x4A9C US_ALU_ALPHA_INST_55
-+0x4AA0 US_ALU_ALPHA_INST_56
-+0x4AA4 US_ALU_ALPHA_INST_57
-+0x4AA8 US_ALU_ALPHA_INST_58
-+0x4AAC US_ALU_ALPHA_INST_59
-+0x4AB0 US_ALU_ALPHA_INST_60
-+0x4AB4 US_ALU_ALPHA_INST_61
-+0x4AB8 US_ALU_ALPHA_INST_62
-+0x4ABC US_ALU_ALPHA_INST_63
-+0x4BC0 FG_FOG_BLEND
-+0x4BC4 FG_FOG_FACTOR
-+0x4BC8 FG_FOG_COLOR_R
-+0x4BCC FG_FOG_COLOR_G
-+0x4BD0 FG_FOG_COLOR_B
-+0x4BD4 FG_ALPHA_FUNC
-+0x4BD8 FG_DEPTH_SRC
-+0x4C00 US_ALU_CONST_R_0
-+0x4C04 US_ALU_CONST_G_0
-+0x4C08 US_ALU_CONST_B_0
-+0x4C0C US_ALU_CONST_A_0
-+0x4C10 US_ALU_CONST_R_1
-+0x4C14 US_ALU_CONST_G_1
-+0x4C18 US_ALU_CONST_B_1
-+0x4C1C US_ALU_CONST_A_1
-+0x4C20 US_ALU_CONST_R_2
-+0x4C24 US_ALU_CONST_G_2
-+0x4C28 US_ALU_CONST_B_2
-+0x4C2C US_ALU_CONST_A_2
-+0x4C30 US_ALU_CONST_R_3
-+0x4C34 US_ALU_CONST_G_3
-+0x4C38 US_ALU_CONST_B_3
-+0x4C3C US_ALU_CONST_A_3
-+0x4C40 US_ALU_CONST_R_4
-+0x4C44 US_ALU_CONST_G_4
-+0x4C48 US_ALU_CONST_B_4
-+0x4C4C US_ALU_CONST_A_4
-+0x4C50 US_ALU_CONST_R_5
-+0x4C54 US_ALU_CONST_G_5
-+0x4C58 US_ALU_CONST_B_5
-+0x4C5C US_ALU_CONST_A_5
-+0x4C60 US_ALU_CONST_R_6
-+0x4C64 US_ALU_CONST_G_6
-+0x4C68 US_ALU_CONST_B_6
-+0x4C6C US_ALU_CONST_A_6
-+0x4C70 US_ALU_CONST_R_7
-+0x4C74 US_ALU_CONST_G_7
-+0x4C78 US_ALU_CONST_B_7
-+0x4C7C US_ALU_CONST_A_7
-+0x4C80 US_ALU_CONST_R_8
-+0x4C84 US_ALU_CONST_G_8
-+0x4C88 US_ALU_CONST_B_8
-+0x4C8C US_ALU_CONST_A_8
-+0x4C90 US_ALU_CONST_R_9
-+0x4C94 US_ALU_CONST_G_9
-+0x4C98 US_ALU_CONST_B_9
-+0x4C9C US_ALU_CONST_A_9
-+0x4CA0 US_ALU_CONST_R_10
-+0x4CA4 US_ALU_CONST_G_10
-+0x4CA8 US_ALU_CONST_B_10
-+0x4CAC US_ALU_CONST_A_10
-+0x4CB0 US_ALU_CONST_R_11
-+0x4CB4 US_ALU_CONST_G_11
-+0x4CB8 US_ALU_CONST_B_11
-+0x4CBC US_ALU_CONST_A_11
-+0x4CC0 US_ALU_CONST_R_12
-+0x4CC4 US_ALU_CONST_G_12
-+0x4CC8 US_ALU_CONST_B_12
-+0x4CCC US_ALU_CONST_A_12
-+0x4CD0 US_ALU_CONST_R_13
-+0x4CD4 US_ALU_CONST_G_13
-+0x4CD8 US_ALU_CONST_B_13
-+0x4CDC US_ALU_CONST_A_13
-+0x4CE0 US_ALU_CONST_R_14
-+0x4CE4 US_ALU_CONST_G_14
-+0x4CE8 US_ALU_CONST_B_14
-+0x4CEC US_ALU_CONST_A_14
-+0x4CF0 US_ALU_CONST_R_15
-+0x4CF4 US_ALU_CONST_G_15
-+0x4CF8 US_ALU_CONST_B_15
-+0x4CFC US_ALU_CONST_A_15
-+0x4D00 US_ALU_CONST_R_16
-+0x4D04 US_ALU_CONST_G_16
-+0x4D08 US_ALU_CONST_B_16
-+0x4D0C US_ALU_CONST_A_16
-+0x4D10 US_ALU_CONST_R_17
-+0x4D14 US_ALU_CONST_G_17
-+0x4D18 US_ALU_CONST_B_17
-+0x4D1C US_ALU_CONST_A_17
-+0x4D20 US_ALU_CONST_R_18
-+0x4D24 US_ALU_CONST_G_18
-+0x4D28 US_ALU_CONST_B_18
-+0x4D2C US_ALU_CONST_A_18
-+0x4D30 US_ALU_CONST_R_19
-+0x4D34 US_ALU_CONST_G_19
-+0x4D38 US_ALU_CONST_B_19
-+0x4D3C US_ALU_CONST_A_19
-+0x4D40 US_ALU_CONST_R_20
-+0x4D44 US_ALU_CONST_G_20
-+0x4D48 US_ALU_CONST_B_20
-+0x4D4C US_ALU_CONST_A_20
-+0x4D50 US_ALU_CONST_R_21
-+0x4D54 US_ALU_CONST_G_21
-+0x4D58 US_ALU_CONST_B_21
-+0x4D5C US_ALU_CONST_A_21
-+0x4D60 US_ALU_CONST_R_22
-+0x4D64 US_ALU_CONST_G_22
-+0x4D68 US_ALU_CONST_B_22
-+0x4D6C US_ALU_CONST_A_22
-+0x4D70 US_ALU_CONST_R_23
-+0x4D74 US_ALU_CONST_G_23
-+0x4D78 US_ALU_CONST_B_23
-+0x4D7C US_ALU_CONST_A_23
-+0x4D80 US_ALU_CONST_R_24
-+0x4D84 US_ALU_CONST_G_24
-+0x4D88 US_ALU_CONST_B_24
-+0x4D8C US_ALU_CONST_A_24
-+0x4D90 US_ALU_CONST_R_25
-+0x4D94 US_ALU_CONST_G_25
-+0x4D98 US_ALU_CONST_B_25
-+0x4D9C US_ALU_CONST_A_25
-+0x4DA0 US_ALU_CONST_R_26
-+0x4DA4 US_ALU_CONST_G_26
-+0x4DA8 US_ALU_CONST_B_26
-+0x4DAC US_ALU_CONST_A_26
-+0x4DB0 US_ALU_CONST_R_27
-+0x4DB4 US_ALU_CONST_G_27
-+0x4DB8 US_ALU_CONST_B_27
-+0x4DBC US_ALU_CONST_A_27
-+0x4DC0 US_ALU_CONST_R_28
-+0x4DC4 US_ALU_CONST_G_28
-+0x4DC8 US_ALU_CONST_B_28
-+0x4DCC US_ALU_CONST_A_28
-+0x4DD0 US_ALU_CONST_R_29
-+0x4DD4 US_ALU_CONST_G_29
-+0x4DD8 US_ALU_CONST_B_29
-+0x4DDC US_ALU_CONST_A_29
-+0x4DE0 US_ALU_CONST_R_30
-+0x4DE4 US_ALU_CONST_G_30
-+0x4DE8 US_ALU_CONST_B_30
-+0x4DEC US_ALU_CONST_A_30
-+0x4DF0 US_ALU_CONST_R_31
-+0x4DF4 US_ALU_CONST_G_31
-+0x4DF8 US_ALU_CONST_B_31
-+0x4DFC US_ALU_CONST_A_31
-+0x4E04 RB3D_BLENDCNTL_R3
-+0x4E08 RB3D_ABLENDCNTL_R3
-+0x4E0C RB3D_COLOR_CHANNEL_MASK
-+0x4E10 RB3D_CONSTANT_COLOR
-+0x4E14 RB3D_COLOR_CLEAR_VALUE
-+0x4E18 RB3D_ROPCNTL_R3
-+0x4E1C RB3D_CLRCMP_FLIPE_R3
-+0x4E20 RB3D_CLRCMP_CLR_R3
-+0x4E24 RB3D_CLRCMP_MSK_R3
-+0x4E48 RB3D_DEBUG_CTL
-+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
-+0x4E50 RB3D_DITHER_CTL
-+0x4E54 RB3D_CMASK_OFFSET0
-+0x4E58 RB3D_CMASK_OFFSET1
-+0x4E5C RB3D_CMASK_OFFSET2
-+0x4E60 RB3D_CMASK_OFFSET3
-+0x4E64 RB3D_CMASK_PITCH0
-+0x4E68 RB3D_CMASK_PITCH1
-+0x4E6C RB3D_CMASK_PITCH2
-+0x4E70 RB3D_CMASK_PITCH3
-+0x4E74 RB3D_CMASK_WRINDEX
-+0x4E78 RB3D_CMASK_DWORD
-+0x4E7C RB3D_CMASK_RDINDEX
-+0x4E80 RB3D_AARESOLVE_OFFSET
-+0x4E84 RB3D_AARESOLVE_PITCH
-+0x4E88 RB3D_AARESOLVE_CTL
-+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
-+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
-+0x4F04 ZB_ZSTENCILCNTL
-+0x4F08 ZB_STENCILREFMASK
-+0x4F14 ZB_ZTOP
-+0x4F18 ZB_ZCACHE_CTLSTAT
-+0x4F1C ZB_BW_CNTL
-+0x4F28 ZB_DEPTHCLEARVALUE
-+0x4F30 ZB_ZMASK_OFFSET
-+0x4F34 ZB_ZMASK_PITCH
-+0x4F38 ZB_ZMASK_WRINDEX
-+0x4F3C ZB_ZMASK_DWORD
-+0x4F40 ZB_ZMASK_RDINDEX
-+0x4F44 ZB_HIZ_OFFSET
-+0x4F48 ZB_HIZ_WRINDEX
-+0x4F4C ZB_HIZ_DWORD
-+0x4F50 ZB_HIZ_RDINDEX
-+0x4F54 ZB_HIZ_PITCH
-+0x4F58 ZB_ZPASS_DATA
-diff --git a/drivers/gpu/drm/radeon/reg_srcs/rn50 b/drivers/gpu/drm/radeon/reg_srcs/rn50
-new file mode 100644
-index 0000000..2687b63
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/reg_srcs/rn50
-@@ -0,0 +1,30 @@
-+rn50 0x3294
-+0x1434 SRC_Y_X
-+0x1438 DST_Y_X
-+0x143C DST_HEIGHT_WIDTH
-+0x146C DP_GUI_MASTER_CNTL
-+0x1474 BRUSH_Y_X
-+0x1478 DP_BRUSH_BKGD_CLR
-+0x147C DP_BRUSH_FRGD_CLR
-+0x1480 BRUSH_DATA0
-+0x1484 BRUSH_DATA1
-+0x1598 DST_WIDTH_HEIGHT
-+0x15C0 CLR_CMP_CNTL
-+0x15C4 CLR_CMP_CLR_SRC
-+0x15C8 CLR_CMP_CLR_DST
-+0x15CC CLR_CMP_MSK
-+0x15D8 DP_SRC_FRGD_CLR
-+0x15DC DP_SRC_BKGD_CLR
-+0x1600 DST_LINE_START
-+0x1604 DST_LINE_END
-+0x1608 DST_LINE_PATCOUNT
-+0x16C0 DP_CNTL
-+0x16CC DP_WRITE_MSK
-+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
-+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
-+0x16EC SC_TOP_LEFT
-+0x16F0 SC_BOTTOM_RIGHT
-+0x16F4 SRC_SC_BOTTOM_RIGHT
-+0x1714 DSTCACHE_CTLSTAT
-+0x1720 WAIT_UNTIL
-+0x172C RBBM_GUICNTL
-diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
-new file mode 100644
-index 0000000..8e3c0b8
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
-@@ -0,0 +1,729 @@
-+rs600 0x6d40
-+0x1434 SRC_Y_X
-+0x1438 DST_Y_X
-+0x143C DST_HEIGHT_WIDTH
-+0x146C DP_GUI_MASTER_CNTL
-+0x1474 BRUSH_Y_X
-+0x1478 DP_BRUSH_BKGD_CLR
-+0x147C DP_BRUSH_FRGD_CLR
-+0x1480 BRUSH_DATA0
-+0x1484 BRUSH_DATA1
-+0x1598 DST_WIDTH_HEIGHT
-+0x15C0 CLR_CMP_CNTL
-+0x15C4 CLR_CMP_CLR_SRC
-+0x15C8 CLR_CMP_CLR_DST
-+0x15CC CLR_CMP_MSK
-+0x15D8 DP_SRC_FRGD_CLR
-+0x15DC DP_SRC_BKGD_CLR
-+0x1600 DST_LINE_START
-+0x1604 DST_LINE_END
-+0x1608 DST_LINE_PATCOUNT
-+0x16C0 DP_CNTL
-+0x16CC DP_WRITE_MSK
-+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
-+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
-+0x16EC SC_TOP_LEFT
-+0x16F0 SC_BOTTOM_RIGHT
-+0x16F4 SRC_SC_BOTTOM_RIGHT
-+0x1714 DSTCACHE_CTLSTAT
-+0x1720 WAIT_UNTIL
-+0x172C RBBM_GUICNTL
-+0x1D98 VAP_VPORT_XSCALE
-+0x1D9C VAP_VPORT_XOFFSET
-+0x1DA0 VAP_VPORT_YSCALE
-+0x1DA4 VAP_VPORT_YOFFSET
-+0x1DA8 VAP_VPORT_ZSCALE
-+0x1DAC VAP_VPORT_ZOFFSET
-+0x2080 VAP_CNTL
-+0x2090 VAP_OUT_VTX_FMT_0
-+0x2094 VAP_OUT_VTX_FMT_1
-+0x20B0 VAP_VTE_CNTL
-+0x2138 VAP_VF_MIN_VTX_INDX
-+0x2140 VAP_CNTL_STATUS
-+0x2150 VAP_PROG_STREAM_CNTL_0
-+0x2154 VAP_PROG_STREAM_CNTL_1
-+0x2158 VAP_PROG_STREAM_CNTL_2
-+0x215C VAP_PROG_STREAM_CNTL_3
-+0x2160 VAP_PROG_STREAM_CNTL_4
-+0x2164 VAP_PROG_STREAM_CNTL_5
-+0x2168 VAP_PROG_STREAM_CNTL_6
-+0x216C VAP_PROG_STREAM_CNTL_7
-+0x2180 VAP_VTX_STATE_CNTL
-+0x2184 VAP_VSM_VTX_ASSM
-+0x2188 VAP_VTX_STATE_IND_REG_0
-+0x218C VAP_VTX_STATE_IND_REG_1
-+0x2190 VAP_VTX_STATE_IND_REG_2
-+0x2194 VAP_VTX_STATE_IND_REG_3
-+0x2198 VAP_VTX_STATE_IND_REG_4
-+0x219C VAP_VTX_STATE_IND_REG_5
-+0x21A0 VAP_VTX_STATE_IND_REG_6
-+0x21A4 VAP_VTX_STATE_IND_REG_7
-+0x21A8 VAP_VTX_STATE_IND_REG_8
-+0x21AC VAP_VTX_STATE_IND_REG_9
-+0x21B0 VAP_VTX_STATE_IND_REG_10
-+0x21B4 VAP_VTX_STATE_IND_REG_11
-+0x21B8 VAP_VTX_STATE_IND_REG_12
-+0x21BC VAP_VTX_STATE_IND_REG_13
-+0x21C0 VAP_VTX_STATE_IND_REG_14
-+0x21C4 VAP_VTX_STATE_IND_REG_15
-+0x21DC VAP_PSC_SGN_NORM_CNTL
-+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
-+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
-+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
-+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
-+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
-+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
-+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
-+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
-+0x2200 VAP_PVS_VECTOR_INDX_REG
-+0x2204 VAP_PVS_VECTOR_DATA_REG
-+0x2208 VAP_PVS_VECTOR_DATA_REG_128
-+0x221C VAP_CLIP_CNTL
-+0x2220 VAP_GB_VERT_CLIP_ADJ
-+0x2224 VAP_GB_VERT_DISC_ADJ
-+0x2228 VAP_GB_HORZ_CLIP_ADJ
-+0x222C VAP_GB_HORZ_DISC_ADJ
-+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
-+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
-+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
-+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
-+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
-+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
-+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
-+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
-+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
-+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
-+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
-+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
-+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
-+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
-+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
-+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
-+0x2284 VAP_PVS_STATE_FLUSH_REG
-+0x2288 VAP_PVS_VTX_TIMEOUT_REG
-+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
-+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
-+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
-+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
-+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
-+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
-+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
-+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
-+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
-+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
-+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
-+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
-+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
-+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
-+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
-+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
-+0x22D0 VAP_PVS_CODE_CNTL_0
-+0x22D4 VAP_PVS_CONST_CNTL
-+0x22D8 VAP_PVS_CODE_CNTL_1
-+0x22DC VAP_PVS_FLOW_CNTL_OPC
-+0x342C RB2D_DSTCACHE_CTLSTAT
-+0x4000 GB_VAP_RASTER_VTX_FMT_0
-+0x4004 GB_VAP_RASTER_VTX_FMT_1
-+0x4008 GB_ENABLE
-+0x401C GB_SELECT
-+0x4020 GB_AA_CONFIG
-+0x4024 GB_FIFO_SIZE
-+0x4100 TX_INVALTAGS
-+0x4200 GA_POINT_S0
-+0x4204 GA_POINT_T0
-+0x4208 GA_POINT_S1
-+0x420C GA_POINT_T1
-+0x4214 GA_TRIANGLE_STIPPLE
-+0x421C GA_POINT_SIZE
-+0x4230 GA_POINT_MINMAX
-+0x4234 GA_LINE_CNTL
-+0x4238 GA_LINE_STIPPLE_CONFIG
-+0x4260 GA_LINE_STIPPLE_VALUE
-+0x4264 GA_LINE_S0
-+0x4268 GA_LINE_S1
-+0x4278 GA_COLOR_CONTROL
-+0x427C GA_SOLID_RG
-+0x4280 GA_SOLID_BA
-+0x4288 GA_POLY_MODE
-+0x428C GA_ROUND_MODE
-+0x4290 GA_OFFSET
-+0x4294 GA_FOG_SCALE
-+0x4298 GA_FOG_OFFSET
-+0x42A0 SU_TEX_WRAP
-+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
-+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
-+0x42AC SU_POLY_OFFSET_BACK_SCALE
-+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
-+0x42B4 SU_POLY_OFFSET_ENABLE
-+0x42B8 SU_CULL_MODE
-+0x42C0 SU_DEPTH_SCALE
-+0x42C4 SU_DEPTH_OFFSET
-+0x42C8 SU_REG_DEST
-+0x4300 RS_COUNT
-+0x4304 RS_INST_COUNT
-+0x4310 RS_IP_0
-+0x4314 RS_IP_1
-+0x4318 RS_IP_2
-+0x431C RS_IP_3
-+0x4320 RS_IP_4
-+0x4324 RS_IP_5
-+0x4328 RS_IP_6
-+0x432C RS_IP_7
-+0x4330 RS_INST_0
-+0x4334 RS_INST_1
-+0x4338 RS_INST_2
-+0x433C RS_INST_3
-+0x4340 RS_INST_4
-+0x4344 RS_INST_5
-+0x4348 RS_INST_6
-+0x434C RS_INST_7
-+0x4350 RS_INST_8
-+0x4354 RS_INST_9
-+0x4358 RS_INST_10
-+0x435C RS_INST_11
-+0x4360 RS_INST_12
-+0x4364 RS_INST_13
-+0x4368 RS_INST_14
-+0x436C RS_INST_15
-+0x43A4 SC_HYPERZ_EN
-+0x43A8 SC_EDGERULE
-+0x43B0 SC_CLIP_0_A
-+0x43B4 SC_CLIP_0_B
-+0x43B8 SC_CLIP_1_A
-+0x43BC SC_CLIP_1_B
-+0x43C0 SC_CLIP_2_A
-+0x43C4 SC_CLIP_2_B
-+0x43C8 SC_CLIP_3_A
-+0x43CC SC_CLIP_3_B
-+0x43D0 SC_CLIP_RULE
-+0x43E0 SC_SCISSOR0
-+0x43E8 SC_SCREENDOOR
-+0x4440 TX_FILTER1_0
-+0x4444 TX_FILTER1_1
-+0x4448 TX_FILTER1_2
-+0x444C TX_FILTER1_3
-+0x4450 TX_FILTER1_4
-+0x4454 TX_FILTER1_5
-+0x4458 TX_FILTER1_6
-+0x445C TX_FILTER1_7
-+0x4460 TX_FILTER1_8
-+0x4464 TX_FILTER1_9
-+0x4468 TX_FILTER1_10
-+0x446C TX_FILTER1_11
-+0x4470 TX_FILTER1_12
-+0x4474 TX_FILTER1_13
-+0x4478 TX_FILTER1_14
-+0x447C TX_FILTER1_15
-+0x4580 TX_CHROMA_KEY_0
-+0x4584 TX_CHROMA_KEY_1
-+0x4588 TX_CHROMA_KEY_2
-+0x458C TX_CHROMA_KEY_3
-+0x4590 TX_CHROMA_KEY_4
-+0x4594 TX_CHROMA_KEY_5
-+0x4598 TX_CHROMA_KEY_6
-+0x459C TX_CHROMA_KEY_7
-+0x45A0 TX_CHROMA_KEY_8
-+0x45A4 TX_CHROMA_KEY_9
-+0x45A8 TX_CHROMA_KEY_10
-+0x45AC TX_CHROMA_KEY_11
-+0x45B0 TX_CHROMA_KEY_12
-+0x45B4 TX_CHROMA_KEY_13
-+0x45B8 TX_CHROMA_KEY_14
-+0x45BC TX_CHROMA_KEY_15
-+0x45C0 TX_BORDER_COLOR_0
-+0x45C4 TX_BORDER_COLOR_1
-+0x45C8 TX_BORDER_COLOR_2
-+0x45CC TX_BORDER_COLOR_3
-+0x45D0 TX_BORDER_COLOR_4
-+0x45D4 TX_BORDER_COLOR_5
-+0x45D8 TX_BORDER_COLOR_6
-+0x45DC TX_BORDER_COLOR_7
-+0x45E0 TX_BORDER_COLOR_8
-+0x45E4 TX_BORDER_COLOR_9
-+0x45E8 TX_BORDER_COLOR_10
-+0x45EC TX_BORDER_COLOR_11
-+0x45F0 TX_BORDER_COLOR_12
-+0x45F4 TX_BORDER_COLOR_13
-+0x45F8 TX_BORDER_COLOR_14
-+0x45FC TX_BORDER_COLOR_15
-+0x4600 US_CONFIG
-+0x4604 US_PIXSIZE
-+0x4608 US_CODE_OFFSET
-+0x460C US_RESET
-+0x4610 US_CODE_ADDR_0
-+0x4614 US_CODE_ADDR_1
-+0x4618 US_CODE_ADDR_2
-+0x461C US_CODE_ADDR_3
-+0x4620 US_TEX_INST_0
-+0x4624 US_TEX_INST_1
-+0x4628 US_TEX_INST_2
-+0x462C US_TEX_INST_3
-+0x4630 US_TEX_INST_4
-+0x4634 US_TEX_INST_5
-+0x4638 US_TEX_INST_6
-+0x463C US_TEX_INST_7
-+0x4640 US_TEX_INST_8
-+0x4644 US_TEX_INST_9
-+0x4648 US_TEX_INST_10
-+0x464C US_TEX_INST_11
-+0x4650 US_TEX_INST_12
-+0x4654 US_TEX_INST_13
-+0x4658 US_TEX_INST_14
-+0x465C US_TEX_INST_15
-+0x4660 US_TEX_INST_16
-+0x4664 US_TEX_INST_17
-+0x4668 US_TEX_INST_18
-+0x466C US_TEX_INST_19
-+0x4670 US_TEX_INST_20
-+0x4674 US_TEX_INST_21
-+0x4678 US_TEX_INST_22
-+0x467C US_TEX_INST_23
-+0x4680 US_TEX_INST_24
-+0x4684 US_TEX_INST_25
-+0x4688 US_TEX_INST_26
-+0x468C US_TEX_INST_27
-+0x4690 US_TEX_INST_28
-+0x4694 US_TEX_INST_29
-+0x4698 US_TEX_INST_30
-+0x469C US_TEX_INST_31
-+0x46A4 US_OUT_FMT_0
-+0x46A8 US_OUT_FMT_1
-+0x46AC US_OUT_FMT_2
-+0x46B0 US_OUT_FMT_3
-+0x46B4 US_W_FMT
-+0x46C0 US_ALU_RGB_ADDR_0
-+0x46C4 US_ALU_RGB_ADDR_1
-+0x46C8 US_ALU_RGB_ADDR_2
-+0x46CC US_ALU_RGB_ADDR_3
-+0x46D0 US_ALU_RGB_ADDR_4
-+0x46D4 US_ALU_RGB_ADDR_5
-+0x46D8 US_ALU_RGB_ADDR_6
-+0x46DC US_ALU_RGB_ADDR_7
-+0x46E0 US_ALU_RGB_ADDR_8
-+0x46E4 US_ALU_RGB_ADDR_9
-+0x46E8 US_ALU_RGB_ADDR_10
-+0x46EC US_ALU_RGB_ADDR_11
-+0x46F0 US_ALU_RGB_ADDR_12
-+0x46F4 US_ALU_RGB_ADDR_13
-+0x46F8 US_ALU_RGB_ADDR_14
-+0x46FC US_ALU_RGB_ADDR_15
-+0x4700 US_ALU_RGB_ADDR_16
-+0x4704 US_ALU_RGB_ADDR_17
-+0x4708 US_ALU_RGB_ADDR_18
-+0x470C US_ALU_RGB_ADDR_19
-+0x4710 US_ALU_RGB_ADDR_20
-+0x4714 US_ALU_RGB_ADDR_21
-+0x4718 US_ALU_RGB_ADDR_22
-+0x471C US_ALU_RGB_ADDR_23
-+0x4720 US_ALU_RGB_ADDR_24
-+0x4724 US_ALU_RGB_ADDR_25
-+0x4728 US_ALU_RGB_ADDR_26
-+0x472C US_ALU_RGB_ADDR_27
-+0x4730 US_ALU_RGB_ADDR_28
-+0x4734 US_ALU_RGB_ADDR_29
-+0x4738 US_ALU_RGB_ADDR_30
-+0x473C US_ALU_RGB_ADDR_31
-+0x4740 US_ALU_RGB_ADDR_32
-+0x4744 US_ALU_RGB_ADDR_33
-+0x4748 US_ALU_RGB_ADDR_34
-+0x474C US_ALU_RGB_ADDR_35
-+0x4750 US_ALU_RGB_ADDR_36
-+0x4754 US_ALU_RGB_ADDR_37
-+0x4758 US_ALU_RGB_ADDR_38
-+0x475C US_ALU_RGB_ADDR_39
-+0x4760 US_ALU_RGB_ADDR_40
-+0x4764 US_ALU_RGB_ADDR_41
-+0x4768 US_ALU_RGB_ADDR_42
-+0x476C US_ALU_RGB_ADDR_43
-+0x4770 US_ALU_RGB_ADDR_44
-+0x4774 US_ALU_RGB_ADDR_45
-+0x4778 US_ALU_RGB_ADDR_46
-+0x477C US_ALU_RGB_ADDR_47
-+0x4780 US_ALU_RGB_ADDR_48
-+0x4784 US_ALU_RGB_ADDR_49
-+0x4788 US_ALU_RGB_ADDR_50
-+0x478C US_ALU_RGB_ADDR_51
-+0x4790 US_ALU_RGB_ADDR_52
-+0x4794 US_ALU_RGB_ADDR_53
-+0x4798 US_ALU_RGB_ADDR_54
-+0x479C US_ALU_RGB_ADDR_55
-+0x47A0 US_ALU_RGB_ADDR_56
-+0x47A4 US_ALU_RGB_ADDR_57
-+0x47A8 US_ALU_RGB_ADDR_58
-+0x47AC US_ALU_RGB_ADDR_59
-+0x47B0 US_ALU_RGB_ADDR_60
-+0x47B4 US_ALU_RGB_ADDR_61
-+0x47B8 US_ALU_RGB_ADDR_62
-+0x47BC US_ALU_RGB_ADDR_63
-+0x47C0 US_ALU_ALPHA_ADDR_0
-+0x47C4 US_ALU_ALPHA_ADDR_1
-+0x47C8 US_ALU_ALPHA_ADDR_2
-+0x47CC US_ALU_ALPHA_ADDR_3
-+0x47D0 US_ALU_ALPHA_ADDR_4
-+0x47D4 US_ALU_ALPHA_ADDR_5
-+0x47D8 US_ALU_ALPHA_ADDR_6
-+0x47DC US_ALU_ALPHA_ADDR_7
-+0x47E0 US_ALU_ALPHA_ADDR_8
-+0x47E4 US_ALU_ALPHA_ADDR_9
-+0x47E8 US_ALU_ALPHA_ADDR_10
-+0x47EC US_ALU_ALPHA_ADDR_11
-+0x47F0 US_ALU_ALPHA_ADDR_12
-+0x47F4 US_ALU_ALPHA_ADDR_13
-+0x47F8 US_ALU_ALPHA_ADDR_14
-+0x47FC US_ALU_ALPHA_ADDR_15
-+0x4800 US_ALU_ALPHA_ADDR_16
-+0x4804 US_ALU_ALPHA_ADDR_17
-+0x4808 US_ALU_ALPHA_ADDR_18
-+0x480C US_ALU_ALPHA_ADDR_19
-+0x4810 US_ALU_ALPHA_ADDR_20
-+0x4814 US_ALU_ALPHA_ADDR_21
-+0x4818 US_ALU_ALPHA_ADDR_22
-+0x481C US_ALU_ALPHA_ADDR_23
-+0x4820 US_ALU_ALPHA_ADDR_24
-+0x4824 US_ALU_ALPHA_ADDR_25
-+0x4828 US_ALU_ALPHA_ADDR_26
-+0x482C US_ALU_ALPHA_ADDR_27
-+0x4830 US_ALU_ALPHA_ADDR_28
-+0x4834 US_ALU_ALPHA_ADDR_29
-+0x4838 US_ALU_ALPHA_ADDR_30
-+0x483C US_ALU_ALPHA_ADDR_31
-+0x4840 US_ALU_ALPHA_ADDR_32
-+0x4844 US_ALU_ALPHA_ADDR_33
-+0x4848 US_ALU_ALPHA_ADDR_34
-+0x484C US_ALU_ALPHA_ADDR_35
-+0x4850 US_ALU_ALPHA_ADDR_36
-+0x4854 US_ALU_ALPHA_ADDR_37
-+0x4858 US_ALU_ALPHA_ADDR_38
-+0x485C US_ALU_ALPHA_ADDR_39
-+0x4860 US_ALU_ALPHA_ADDR_40
-+0x4864 US_ALU_ALPHA_ADDR_41
-+0x4868 US_ALU_ALPHA_ADDR_42
-+0x486C US_ALU_ALPHA_ADDR_43
-+0x4870 US_ALU_ALPHA_ADDR_44
-+0x4874 US_ALU_ALPHA_ADDR_45
-+0x4878 US_ALU_ALPHA_ADDR_46
-+0x487C US_ALU_ALPHA_ADDR_47
-+0x4880 US_ALU_ALPHA_ADDR_48
-+0x4884 US_ALU_ALPHA_ADDR_49
-+0x4888 US_ALU_ALPHA_ADDR_50
-+0x488C US_ALU_ALPHA_ADDR_51
-+0x4890 US_ALU_ALPHA_ADDR_52
-+0x4894 US_ALU_ALPHA_ADDR_53
-+0x4898 US_ALU_ALPHA_ADDR_54
-+0x489C US_ALU_ALPHA_ADDR_55
-+0x48A0 US_ALU_ALPHA_ADDR_56
-+0x48A4 US_ALU_ALPHA_ADDR_57
-+0x48A8 US_ALU_ALPHA_ADDR_58
-+0x48AC US_ALU_ALPHA_ADDR_59
-+0x48B0 US_ALU_ALPHA_ADDR_60
-+0x48B4 US_ALU_ALPHA_ADDR_61
-+0x48B8 US_ALU_ALPHA_ADDR_62
-+0x48BC US_ALU_ALPHA_ADDR_63
-+0x48C0 US_ALU_RGB_INST_0
-+0x48C4 US_ALU_RGB_INST_1
-+0x48C8 US_ALU_RGB_INST_2
-+0x48CC US_ALU_RGB_INST_3
-+0x48D0 US_ALU_RGB_INST_4
-+0x48D4 US_ALU_RGB_INST_5
-+0x48D8 US_ALU_RGB_INST_6
-+0x48DC US_ALU_RGB_INST_7
-+0x48E0 US_ALU_RGB_INST_8
-+0x48E4 US_ALU_RGB_INST_9
-+0x48E8 US_ALU_RGB_INST_10
-+0x48EC US_ALU_RGB_INST_11
-+0x48F0 US_ALU_RGB_INST_12
-+0x48F4 US_ALU_RGB_INST_13
-+0x48F8 US_ALU_RGB_INST_14
-+0x48FC US_ALU_RGB_INST_15
-+0x4900 US_ALU_RGB_INST_16
-+0x4904 US_ALU_RGB_INST_17
-+0x4908 US_ALU_RGB_INST_18
-+0x490C US_ALU_RGB_INST_19
-+0x4910 US_ALU_RGB_INST_20
-+0x4914 US_ALU_RGB_INST_21
-+0x4918 US_ALU_RGB_INST_22
-+0x491C US_ALU_RGB_INST_23
-+0x4920 US_ALU_RGB_INST_24
-+0x4924 US_ALU_RGB_INST_25
-+0x4928 US_ALU_RGB_INST_26
-+0x492C US_ALU_RGB_INST_27
-+0x4930 US_ALU_RGB_INST_28
-+0x4934 US_ALU_RGB_INST_29
-+0x4938 US_ALU_RGB_INST_30
-+0x493C US_ALU_RGB_INST_31
-+0x4940 US_ALU_RGB_INST_32
-+0x4944 US_ALU_RGB_INST_33
-+0x4948 US_ALU_RGB_INST_34
-+0x494C US_ALU_RGB_INST_35
-+0x4950 US_ALU_RGB_INST_36
-+0x4954 US_ALU_RGB_INST_37
-+0x4958 US_ALU_RGB_INST_38
-+0x495C US_ALU_RGB_INST_39
-+0x4960 US_ALU_RGB_INST_40
-+0x4964 US_ALU_RGB_INST_41
-+0x4968 US_ALU_RGB_INST_42
-+0x496C US_ALU_RGB_INST_43
-+0x4970 US_ALU_RGB_INST_44
-+0x4974 US_ALU_RGB_INST_45
-+0x4978 US_ALU_RGB_INST_46
-+0x497C US_ALU_RGB_INST_47
-+0x4980 US_ALU_RGB_INST_48
-+0x4984 US_ALU_RGB_INST_49
-+0x4988 US_ALU_RGB_INST_50
-+0x498C US_ALU_RGB_INST_51
-+0x4990 US_ALU_RGB_INST_52
-+0x4994 US_ALU_RGB_INST_53
-+0x4998 US_ALU_RGB_INST_54
-+0x499C US_ALU_RGB_INST_55
-+0x49A0 US_ALU_RGB_INST_56
-+0x49A4 US_ALU_RGB_INST_57
-+0x49A8 US_ALU_RGB_INST_58
-+0x49AC US_ALU_RGB_INST_59
-+0x49B0 US_ALU_RGB_INST_60
-+0x49B4 US_ALU_RGB_INST_61
-+0x49B8 US_ALU_RGB_INST_62
-+0x49BC US_ALU_RGB_INST_63
-+0x49C0 US_ALU_ALPHA_INST_0
-+0x49C4 US_ALU_ALPHA_INST_1
-+0x49C8 US_ALU_ALPHA_INST_2
-+0x49CC US_ALU_ALPHA_INST_3
-+0x49D0 US_ALU_ALPHA_INST_4
-+0x49D4 US_ALU_ALPHA_INST_5
-+0x49D8 US_ALU_ALPHA_INST_6
-+0x49DC US_ALU_ALPHA_INST_7
-+0x49E0 US_ALU_ALPHA_INST_8
-+0x49E4 US_ALU_ALPHA_INST_9
-+0x49E8 US_ALU_ALPHA_INST_10
-+0x49EC US_ALU_ALPHA_INST_11
-+0x49F0 US_ALU_ALPHA_INST_12
-+0x49F4 US_ALU_ALPHA_INST_13
-+0x49F8 US_ALU_ALPHA_INST_14
-+0x49FC US_ALU_ALPHA_INST_15
-+0x4A00 US_ALU_ALPHA_INST_16
-+0x4A04 US_ALU_ALPHA_INST_17
-+0x4A08 US_ALU_ALPHA_INST_18
-+0x4A0C US_ALU_ALPHA_INST_19
-+0x4A10 US_ALU_ALPHA_INST_20
-+0x4A14 US_ALU_ALPHA_INST_21
-+0x4A18 US_ALU_ALPHA_INST_22
-+0x4A1C US_ALU_ALPHA_INST_23
-+0x4A20 US_ALU_ALPHA_INST_24
-+0x4A24 US_ALU_ALPHA_INST_25
-+0x4A28 US_ALU_ALPHA_INST_26
-+0x4A2C US_ALU_ALPHA_INST_27
-+0x4A30 US_ALU_ALPHA_INST_28
-+0x4A34 US_ALU_ALPHA_INST_29
-+0x4A38 US_ALU_ALPHA_INST_30
-+0x4A3C US_ALU_ALPHA_INST_31
-+0x4A40 US_ALU_ALPHA_INST_32
-+0x4A44 US_ALU_ALPHA_INST_33
-+0x4A48 US_ALU_ALPHA_INST_34
-+0x4A4C US_ALU_ALPHA_INST_35
-+0x4A50 US_ALU_ALPHA_INST_36
-+0x4A54 US_ALU_ALPHA_INST_37
-+0x4A58 US_ALU_ALPHA_INST_38
-+0x4A5C US_ALU_ALPHA_INST_39
-+0x4A60 US_ALU_ALPHA_INST_40
-+0x4A64 US_ALU_ALPHA_INST_41
-+0x4A68 US_ALU_ALPHA_INST_42
-+0x4A6C US_ALU_ALPHA_INST_43
-+0x4A70 US_ALU_ALPHA_INST_44
-+0x4A74 US_ALU_ALPHA_INST_45
-+0x4A78 US_ALU_ALPHA_INST_46
-+0x4A7C US_ALU_ALPHA_INST_47
-+0x4A80 US_ALU_ALPHA_INST_48
-+0x4A84 US_ALU_ALPHA_INST_49
-+0x4A88 US_ALU_ALPHA_INST_50
-+0x4A8C US_ALU_ALPHA_INST_51
-+0x4A90 US_ALU_ALPHA_INST_52
-+0x4A94 US_ALU_ALPHA_INST_53
-+0x4A98 US_ALU_ALPHA_INST_54
-+0x4A9C US_ALU_ALPHA_INST_55
-+0x4AA0 US_ALU_ALPHA_INST_56
-+0x4AA4 US_ALU_ALPHA_INST_57
-+0x4AA8 US_ALU_ALPHA_INST_58
-+0x4AAC US_ALU_ALPHA_INST_59
-+0x4AB0 US_ALU_ALPHA_INST_60
-+0x4AB4 US_ALU_ALPHA_INST_61
-+0x4AB8 US_ALU_ALPHA_INST_62
-+0x4ABC US_ALU_ALPHA_INST_63
-+0x4BC0 FG_FOG_BLEND
-+0x4BC4 FG_FOG_FACTOR
-+0x4BC8 FG_FOG_COLOR_R
-+0x4BCC FG_FOG_COLOR_G
-+0x4BD0 FG_FOG_COLOR_B
-+0x4BD4 FG_ALPHA_FUNC
-+0x4BD8 FG_DEPTH_SRC
-+0x4C00 US_ALU_CONST_R_0
-+0x4C04 US_ALU_CONST_G_0
-+0x4C08 US_ALU_CONST_B_0
-+0x4C0C US_ALU_CONST_A_0
-+0x4C10 US_ALU_CONST_R_1
-+0x4C14 US_ALU_CONST_G_1
-+0x4C18 US_ALU_CONST_B_1
-+0x4C1C US_ALU_CONST_A_1
-+0x4C20 US_ALU_CONST_R_2
-+0x4C24 US_ALU_CONST_G_2
-+0x4C28 US_ALU_CONST_B_2
-+0x4C2C US_ALU_CONST_A_2
-+0x4C30 US_ALU_CONST_R_3
-+0x4C34 US_ALU_CONST_G_3
-+0x4C38 US_ALU_CONST_B_3
-+0x4C3C US_ALU_CONST_A_3
-+0x4C40 US_ALU_CONST_R_4
-+0x4C44 US_ALU_CONST_G_4
-+0x4C48 US_ALU_CONST_B_4
-+0x4C4C US_ALU_CONST_A_4
-+0x4C50 US_ALU_CONST_R_5
-+0x4C54 US_ALU_CONST_G_5
-+0x4C58 US_ALU_CONST_B_5
-+0x4C5C US_ALU_CONST_A_5
-+0x4C60 US_ALU_CONST_R_6
-+0x4C64 US_ALU_CONST_G_6
-+0x4C68 US_ALU_CONST_B_6
-+0x4C6C US_ALU_CONST_A_6
-+0x4C70 US_ALU_CONST_R_7
-+0x4C74 US_ALU_CONST_G_7
-+0x4C78 US_ALU_CONST_B_7
-+0x4C7C US_ALU_CONST_A_7
-+0x4C80 US_ALU_CONST_R_8
-+0x4C84 US_ALU_CONST_G_8
-+0x4C88 US_ALU_CONST_B_8
-+0x4C8C US_ALU_CONST_A_8
-+0x4C90 US_ALU_CONST_R_9
-+0x4C94 US_ALU_CONST_G_9
-+0x4C98 US_ALU_CONST_B_9
-+0x4C9C US_ALU_CONST_A_9
-+0x4CA0 US_ALU_CONST_R_10
-+0x4CA4 US_ALU_CONST_G_10
-+0x4CA8 US_ALU_CONST_B_10
-+0x4CAC US_ALU_CONST_A_10
-+0x4CB0 US_ALU_CONST_R_11
-+0x4CB4 US_ALU_CONST_G_11
-+0x4CB8 US_ALU_CONST_B_11
-+0x4CBC US_ALU_CONST_A_11
-+0x4CC0 US_ALU_CONST_R_12
-+0x4CC4 US_ALU_CONST_G_12
-+0x4CC8 US_ALU_CONST_B_12
-+0x4CCC US_ALU_CONST_A_12
-+0x4CD0 US_ALU_CONST_R_13
-+0x4CD4 US_ALU_CONST_G_13
-+0x4CD8 US_ALU_CONST_B_13
-+0x4CDC US_ALU_CONST_A_13
-+0x4CE0 US_ALU_CONST_R_14
-+0x4CE4 US_ALU_CONST_G_14
-+0x4CE8 US_ALU_CONST_B_14
-+0x4CEC US_ALU_CONST_A_14
-+0x4CF0 US_ALU_CONST_R_15
-+0x4CF4 US_ALU_CONST_G_15
-+0x4CF8 US_ALU_CONST_B_15
-+0x4CFC US_ALU_CONST_A_15
-+0x4D00 US_ALU_CONST_R_16
-+0x4D04 US_ALU_CONST_G_16
-+0x4D08 US_ALU_CONST_B_16
-+0x4D0C US_ALU_CONST_A_16
-+0x4D10 US_ALU_CONST_R_17
-+0x4D14 US_ALU_CONST_G_17
-+0x4D18 US_ALU_CONST_B_17
-+0x4D1C US_ALU_CONST_A_17
-+0x4D20 US_ALU_CONST_R_18
-+0x4D24 US_ALU_CONST_G_18
-+0x4D28 US_ALU_CONST_B_18
-+0x4D2C US_ALU_CONST_A_18
-+0x4D30 US_ALU_CONST_R_19
-+0x4D34 US_ALU_CONST_G_19
-+0x4D38 US_ALU_CONST_B_19
-+0x4D3C US_ALU_CONST_A_19
-+0x4D40 US_ALU_CONST_R_20
-+0x4D44 US_ALU_CONST_G_20
-+0x4D48 US_ALU_CONST_B_20
-+0x4D4C US_ALU_CONST_A_20
-+0x4D50 US_ALU_CONST_R_21
-+0x4D54 US_ALU_CONST_G_21
-+0x4D58 US_ALU_CONST_B_21
-+0x4D5C US_ALU_CONST_A_21
-+0x4D60 US_ALU_CONST_R_22
-+0x4D64 US_ALU_CONST_G_22
-+0x4D68 US_ALU_CONST_B_22
-+0x4D6C US_ALU_CONST_A_22
-+0x4D70 US_ALU_CONST_R_23
-+0x4D74 US_ALU_CONST_G_23
-+0x4D78 US_ALU_CONST_B_23
-+0x4D7C US_ALU_CONST_A_23
-+0x4D80 US_ALU_CONST_R_24
-+0x4D84 US_ALU_CONST_G_24
-+0x4D88 US_ALU_CONST_B_24
-+0x4D8C US_ALU_CONST_A_24
-+0x4D90 US_ALU_CONST_R_25
-+0x4D94 US_ALU_CONST_G_25
-+0x4D98 US_ALU_CONST_B_25
-+0x4D9C US_ALU_CONST_A_25
-+0x4DA0 US_ALU_CONST_R_26
-+0x4DA4 US_ALU_CONST_G_26
-+0x4DA8 US_ALU_CONST_B_26
-+0x4DAC US_ALU_CONST_A_26
-+0x4DB0 US_ALU_CONST_R_27
-+0x4DB4 US_ALU_CONST_G_27
-+0x4DB8 US_ALU_CONST_B_27
-+0x4DBC US_ALU_CONST_A_27
-+0x4DC0 US_ALU_CONST_R_28
-+0x4DC4 US_ALU_CONST_G_28
-+0x4DC8 US_ALU_CONST_B_28
-+0x4DCC US_ALU_CONST_A_28
-+0x4DD0 US_ALU_CONST_R_29
-+0x4DD4 US_ALU_CONST_G_29
-+0x4DD8 US_ALU_CONST_B_29
-+0x4DDC US_ALU_CONST_A_29
-+0x4DE0 US_ALU_CONST_R_30
-+0x4DE4 US_ALU_CONST_G_30
-+0x4DE8 US_ALU_CONST_B_30
-+0x4DEC US_ALU_CONST_A_30
-+0x4DF0 US_ALU_CONST_R_31
-+0x4DF4 US_ALU_CONST_G_31
-+0x4DF8 US_ALU_CONST_B_31
-+0x4DFC US_ALU_CONST_A_31
-+0x4E04 RB3D_BLENDCNTL_R3
-+0x4E08 RB3D_ABLENDCNTL_R3
-+0x4E0C RB3D_COLOR_CHANNEL_MASK
-+0x4E10 RB3D_CONSTANT_COLOR
-+0x4E14 RB3D_COLOR_CLEAR_VALUE
-+0x4E18 RB3D_ROPCNTL_R3
-+0x4E1C RB3D_CLRCMP_FLIPE_R3
-+0x4E20 RB3D_CLRCMP_CLR_R3
-+0x4E24 RB3D_CLRCMP_MSK_R3
-+0x4E48 RB3D_DEBUG_CTL
-+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
-+0x4E50 RB3D_DITHER_CTL
-+0x4E54 RB3D_CMASK_OFFSET0
-+0x4E58 RB3D_CMASK_OFFSET1
-+0x4E5C RB3D_CMASK_OFFSET2
-+0x4E60 RB3D_CMASK_OFFSET3
-+0x4E64 RB3D_CMASK_PITCH0
-+0x4E68 RB3D_CMASK_PITCH1
-+0x4E6C RB3D_CMASK_PITCH2
-+0x4E70 RB3D_CMASK_PITCH3
-+0x4E74 RB3D_CMASK_WRINDEX
-+0x4E78 RB3D_CMASK_DWORD
-+0x4E7C RB3D_CMASK_RDINDEX
-+0x4E80 RB3D_AARESOLVE_OFFSET
-+0x4E84 RB3D_AARESOLVE_PITCH
-+0x4E88 RB3D_AARESOLVE_CTL
-+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
-+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
-+0x4F04 ZB_ZSTENCILCNTL
-+0x4F08 ZB_STENCILREFMASK
-+0x4F14 ZB_ZTOP
-+0x4F18 ZB_ZCACHE_CTLSTAT
-+0x4F1C ZB_BW_CNTL
-+0x4F28 ZB_DEPTHCLEARVALUE
-+0x4F30 ZB_ZMASK_OFFSET
-+0x4F34 ZB_ZMASK_PITCH
-+0x4F38 ZB_ZMASK_WRINDEX
-+0x4F3C ZB_ZMASK_DWORD
-+0x4F40 ZB_ZMASK_RDINDEX
-+0x4F44 ZB_HIZ_OFFSET
-+0x4F48 ZB_HIZ_WRINDEX
-+0x4F4C ZB_HIZ_DWORD
-+0x4F50 ZB_HIZ_RDINDEX
-+0x4F54 ZB_HIZ_PITCH
-+0x4F58 ZB_ZPASS_DATA
-diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
-new file mode 100644
-index 0000000..0102a0d
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
-@@ -0,0 +1,486 @@
-+rv515 0x6d40
-+0x1434 SRC_Y_X
-+0x1438 DST_Y_X
-+0x143C DST_HEIGHT_WIDTH
-+0x146C DP_GUI_MASTER_CNTL
-+0x1474 BRUSH_Y_X
-+0x1478 DP_BRUSH_BKGD_CLR
-+0x147C DP_BRUSH_FRGD_CLR
-+0x1480 BRUSH_DATA0
-+0x1484 BRUSH_DATA1
-+0x1598 DST_WIDTH_HEIGHT
-+0x15C0 CLR_CMP_CNTL
-+0x15C4 CLR_CMP_CLR_SRC
-+0x15C8 CLR_CMP_CLR_DST
-+0x15CC CLR_CMP_MSK
-+0x15D8 DP_SRC_FRGD_CLR
-+0x15DC DP_SRC_BKGD_CLR
-+0x1600 DST_LINE_START
-+0x1604 DST_LINE_END
-+0x1608 DST_LINE_PATCOUNT
-+0x16C0 DP_CNTL
-+0x16CC DP_WRITE_MSK
-+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
-+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
-+0x16EC SC_TOP_LEFT
-+0x16F0 SC_BOTTOM_RIGHT
-+0x16F4 SRC_SC_BOTTOM_RIGHT
-+0x1714 DSTCACHE_CTLSTAT
-+0x1720 WAIT_UNTIL
-+0x172C RBBM_GUICNTL
-+0x1D98 VAP_VPORT_XSCALE
-+0x1D9C VAP_VPORT_XOFFSET
-+0x1DA0 VAP_VPORT_YSCALE
-+0x1DA4 VAP_VPORT_YOFFSET
-+0x1DA8 VAP_VPORT_ZSCALE
-+0x1DAC VAP_VPORT_ZOFFSET
-+0x2080 VAP_CNTL
-+0x2090 VAP_OUT_VTX_FMT_0
-+0x2094 VAP_OUT_VTX_FMT_1
-+0x20B0 VAP_VTE_CNTL
-+0x2138 VAP_VF_MIN_VTX_INDX
-+0x2140 VAP_CNTL_STATUS
-+0x2150 VAP_PROG_STREAM_CNTL_0
-+0x2154 VAP_PROG_STREAM_CNTL_1
-+0x2158 VAP_PROG_STREAM_CNTL_2
-+0x215C VAP_PROG_STREAM_CNTL_3
-+0x2160 VAP_PROG_STREAM_CNTL_4
-+0x2164 VAP_PROG_STREAM_CNTL_5
-+0x2168 VAP_PROG_STREAM_CNTL_6
-+0x216C VAP_PROG_STREAM_CNTL_7
-+0x2180 VAP_VTX_STATE_CNTL
-+0x2184 VAP_VSM_VTX_ASSM
-+0x2188 VAP_VTX_STATE_IND_REG_0
-+0x218C VAP_VTX_STATE_IND_REG_1
-+0x2190 VAP_VTX_STATE_IND_REG_2
-+0x2194 VAP_VTX_STATE_IND_REG_3
-+0x2198 VAP_VTX_STATE_IND_REG_4
-+0x219C VAP_VTX_STATE_IND_REG_5
-+0x21A0 VAP_VTX_STATE_IND_REG_6
-+0x21A4 VAP_VTX_STATE_IND_REG_7
-+0x21A8 VAP_VTX_STATE_IND_REG_8
-+0x21AC VAP_VTX_STATE_IND_REG_9
-+0x21B0 VAP_VTX_STATE_IND_REG_10
-+0x21B4 VAP_VTX_STATE_IND_REG_11
-+0x21B8 VAP_VTX_STATE_IND_REG_12
-+0x21BC VAP_VTX_STATE_IND_REG_13
-+0x21C0 VAP_VTX_STATE_IND_REG_14
-+0x21C4 VAP_VTX_STATE_IND_REG_15
-+0x21DC VAP_PSC_SGN_NORM_CNTL
-+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
-+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
-+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
-+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
-+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
-+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
-+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
-+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
-+0x2200 VAP_PVS_VECTOR_INDX_REG
-+0x2204 VAP_PVS_VECTOR_DATA_REG
-+0x2208 VAP_PVS_VECTOR_DATA_REG_128
-+0x2218 VAP_TEX_TO_COLOR_CNTL
-+0x221C VAP_CLIP_CNTL
-+0x2220 VAP_GB_VERT_CLIP_ADJ
-+0x2224 VAP_GB_VERT_DISC_ADJ
-+0x2228 VAP_GB_HORZ_CLIP_ADJ
-+0x222C VAP_GB_HORZ_DISC_ADJ
-+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
-+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
-+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
-+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
-+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
-+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
-+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
-+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
-+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
-+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
-+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
-+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
-+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
-+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
-+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
-+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
-+0x2284 VAP_PVS_STATE_FLUSH_REG
-+0x2288 VAP_PVS_VTX_TIMEOUT_REG
-+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
-+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
-+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
-+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
-+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
-+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
-+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
-+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
-+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
-+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
-+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
-+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
-+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
-+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
-+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
-+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
-+0x22D0 VAP_PVS_CODE_CNTL_0
-+0x22D4 VAP_PVS_CONST_CNTL
-+0x22D8 VAP_PVS_CODE_CNTL_1
-+0x22DC VAP_PVS_FLOW_CNTL_OPC
-+0x2500 VAP_PVS_FLOW_CNTL_ADDRS_LW_0
-+0x2504 VAP_PVS_FLOW_CNTL_ADDRS_UW_0
-+0x2508 VAP_PVS_FLOW_CNTL_ADDRS_LW_1
-+0x250C VAP_PVS_FLOW_CNTL_ADDRS_UW_1
-+0x2510 VAP_PVS_FLOW_CNTL_ADDRS_LW_2
-+0x2514 VAP_PVS_FLOW_CNTL_ADDRS_UW_2
-+0x2518 VAP_PVS_FLOW_CNTL_ADDRS_LW_3
-+0x251C VAP_PVS_FLOW_CNTL_ADDRS_UW_3
-+0x2520 VAP_PVS_FLOW_CNTL_ADDRS_LW_4
-+0x2524 VAP_PVS_FLOW_CNTL_ADDRS_UW_4
-+0x2528 VAP_PVS_FLOW_CNTL_ADDRS_LW_5
-+0x252C VAP_PVS_FLOW_CNTL_ADDRS_UW_5
-+0x2530 VAP_PVS_FLOW_CNTL_ADDRS_LW_6
-+0x2534 VAP_PVS_FLOW_CNTL_ADDRS_UW_6
-+0x2538 VAP_PVS_FLOW_CNTL_ADDRS_LW_7
-+0x253C VAP_PVS_FLOW_CNTL_ADDRS_UW_7
-+0x2540 VAP_PVS_FLOW_CNTL_ADDRS_LW_8
-+0x2544 VAP_PVS_FLOW_CNTL_ADDRS_UW_8
-+0x2548 VAP_PVS_FLOW_CNTL_ADDRS_LW_9
-+0x254C VAP_PVS_FLOW_CNTL_ADDRS_UW_9
-+0x2550 VAP_PVS_FLOW_CNTL_ADDRS_LW_10
-+0x2554 VAP_PVS_FLOW_CNTL_ADDRS_UW_10
-+0x2558 VAP_PVS_FLOW_CNTL_ADDRS_LW_11
-+0x255C VAP_PVS_FLOW_CNTL_ADDRS_UW_11
-+0x2560 VAP_PVS_FLOW_CNTL_ADDRS_LW_12
-+0x2564 VAP_PVS_FLOW_CNTL_ADDRS_UW_12
-+0x2568 VAP_PVS_FLOW_CNTL_ADDRS_LW_13
-+0x256C VAP_PVS_FLOW_CNTL_ADDRS_UW_13
-+0x2570 VAP_PVS_FLOW_CNTL_ADDRS_LW_14
-+0x2574 VAP_PVS_FLOW_CNTL_ADDRS_UW_14
-+0x2578 VAP_PVS_FLOW_CNTL_ADDRS_LW_15
-+0x257C VAP_PVS_FLOW_CNTL_ADDRS_UW_15
-+0x342C RB2D_DSTCACHE_CTLSTAT
-+0x4000 GB_VAP_RASTER_VTX_FMT_0
-+0x4004 GB_VAP_RASTER_VTX_FMT_1
-+0x4008 GB_ENABLE
-+0x401C GB_SELECT
-+0x4020 GB_AA_CONFIG
-+0x4024 GB_FIFO_SIZE
-+0x4100 TX_INVALTAGS
-+0x4200 GA_POINT_S0
-+0x4204 GA_POINT_T0
-+0x4208 GA_POINT_S1
-+0x420C GA_POINT_T1
-+0x4214 GA_TRIANGLE_STIPPLE
-+0x421C GA_POINT_SIZE
-+0x4230 GA_POINT_MINMAX
-+0x4234 GA_LINE_CNTL
-+0x4238 GA_LINE_STIPPLE_CONFIG
-+0x4260 GA_LINE_STIPPLE_VALUE
-+0x4264 GA_LINE_S0
-+0x4268 GA_LINE_S1
-+0x4278 GA_COLOR_CONTROL
-+0x427C GA_SOLID_RG
-+0x4280 GA_SOLID_BA
-+0x4288 GA_POLY_MODE
-+0x428C GA_ROUND_MODE
-+0x4290 GA_OFFSET
-+0x4294 GA_FOG_SCALE
-+0x4298 GA_FOG_OFFSET
-+0x42A0 SU_TEX_WRAP
-+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
-+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
-+0x42AC SU_POLY_OFFSET_BACK_SCALE
-+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
-+0x42B4 SU_POLY_OFFSET_ENABLE
-+0x42B8 SU_CULL_MODE
-+0x42C0 SU_DEPTH_SCALE
-+0x42C4 SU_DEPTH_OFFSET
-+0x42C8 SU_REG_DEST
-+0x4300 RS_COUNT
-+0x4304 RS_INST_COUNT
-+0x4074 RS_IP_0
-+0x4078 RS_IP_1
-+0x407C RS_IP_2
-+0x4080 RS_IP_3
-+0x4084 RS_IP_4
-+0x4088 RS_IP_5
-+0x408C RS_IP_6
-+0x4090 RS_IP_7
-+0x4094 RS_IP_8
-+0x4098 RS_IP_9
-+0x409C RS_IP_10
-+0x40A0 RS_IP_11
-+0x40A4 RS_IP_12
-+0x40A8 RS_IP_13
-+0x40AC RS_IP_14
-+0x40B0 RS_IP_15
-+0x4320 RS_INST_0
-+0x4324 RS_INST_1
-+0x4328 RS_INST_2
-+0x432C RS_INST_3
-+0x4330 RS_INST_4
-+0x4334 RS_INST_5
-+0x4338 RS_INST_6
-+0x433C RS_INST_7
-+0x4340 RS_INST_8
-+0x4344 RS_INST_9
-+0x4348 RS_INST_10
-+0x434C RS_INST_11
-+0x4350 RS_INST_12
-+0x4354 RS_INST_13
-+0x4358 RS_INST_14
-+0x435C RS_INST_15
-+0x43A4 SC_HYPERZ_EN
-+0x43A8 SC_EDGERULE
-+0x43B0 SC_CLIP_0_A
-+0x43B4 SC_CLIP_0_B
-+0x43B8 SC_CLIP_1_A
-+0x43BC SC_CLIP_1_B
-+0x43C0 SC_CLIP_2_A
-+0x43C4 SC_CLIP_2_B
-+0x43C8 SC_CLIP_3_A
-+0x43CC SC_CLIP_3_B
-+0x43D0 SC_CLIP_RULE
-+0x43E0 SC_SCISSOR0
-+0x43E8 SC_SCREENDOOR
-+0x4440 TX_FILTER1_0
-+0x4444 TX_FILTER1_1
-+0x4448 TX_FILTER1_2
-+0x444C TX_FILTER1_3
-+0x4450 TX_FILTER1_4
-+0x4454 TX_FILTER1_5
-+0x4458 TX_FILTER1_6
-+0x445C TX_FILTER1_7
-+0x4460 TX_FILTER1_8
-+0x4464 TX_FILTER1_9
-+0x4468 TX_FILTER1_10
-+0x446C TX_FILTER1_11
-+0x4470 TX_FILTER1_12
-+0x4474 TX_FILTER1_13
-+0x4478 TX_FILTER1_14
-+0x447C TX_FILTER1_15
-+0x4580 TX_CHROMA_KEY_0
-+0x4584 TX_CHROMA_KEY_1
-+0x4588 TX_CHROMA_KEY_2
-+0x458C TX_CHROMA_KEY_3
-+0x4590 TX_CHROMA_KEY_4
-+0x4594 TX_CHROMA_KEY_5
-+0x4598 TX_CHROMA_KEY_6
-+0x459C TX_CHROMA_KEY_7
-+0x45A0 TX_CHROMA_KEY_8
-+0x45A4 TX_CHROMA_KEY_9
-+0x45A8 TX_CHROMA_KEY_10
-+0x45AC TX_CHROMA_KEY_11
-+0x45B0 TX_CHROMA_KEY_12
-+0x45B4 TX_CHROMA_KEY_13
-+0x45B8 TX_CHROMA_KEY_14
-+0x45BC TX_CHROMA_KEY_15
-+0x45C0 TX_BORDER_COLOR_0
-+0x45C4 TX_BORDER_COLOR_1
-+0x45C8 TX_BORDER_COLOR_2
-+0x45CC TX_BORDER_COLOR_3
-+0x45D0 TX_BORDER_COLOR_4
-+0x45D4 TX_BORDER_COLOR_5
-+0x45D8 TX_BORDER_COLOR_6
-+0x45DC TX_BORDER_COLOR_7
-+0x45E0 TX_BORDER_COLOR_8
-+0x45E4 TX_BORDER_COLOR_9
-+0x45E8 TX_BORDER_COLOR_10
-+0x45EC TX_BORDER_COLOR_11
-+0x45F0 TX_BORDER_COLOR_12
-+0x45F4 TX_BORDER_COLOR_13
-+0x45F8 TX_BORDER_COLOR_14
-+0x45FC TX_BORDER_COLOR_15
-+0x4250 GA_US_VECTOR_INDEX
-+0x4254 GA_US_VECTOR_DATA
-+0x4600 US_CONFIG
-+0x4604 US_PIXSIZE
-+0x4620 US_FC_BOOL_CONST
-+0x4624 US_FC_CTRL
-+0x4630 US_CODE_ADDR
-+0x4634 US_CODE_RANGE
-+0x4638 US_CODE_OFFSET
-+0x46A4 US_OUT_FMT_0
-+0x46A8 US_OUT_FMT_1
-+0x46AC US_OUT_FMT_2
-+0x46B0 US_OUT_FMT_3
-+0x46B4 US_W_FMT
-+0x4BC0 FG_FOG_BLEND
-+0x4BC4 FG_FOG_FACTOR
-+0x4BC8 FG_FOG_COLOR_R
-+0x4BCC FG_FOG_COLOR_G
-+0x4BD0 FG_FOG_COLOR_B
-+0x4BD4 FG_ALPHA_FUNC
-+0x4BD8 FG_DEPTH_SRC
-+0x4C00 US_ALU_CONST_R_0
-+0x4C04 US_ALU_CONST_G_0
-+0x4C08 US_ALU_CONST_B_0
-+0x4C0C US_ALU_CONST_A_0
-+0x4C10 US_ALU_CONST_R_1
-+0x4C14 US_ALU_CONST_G_1
-+0x4C18 US_ALU_CONST_B_1
-+0x4C1C US_ALU_CONST_A_1
-+0x4C20 US_ALU_CONST_R_2
-+0x4C24 US_ALU_CONST_G_2
-+0x4C28 US_ALU_CONST_B_2
-+0x4C2C US_ALU_CONST_A_2
-+0x4C30 US_ALU_CONST_R_3
-+0x4C34 US_ALU_CONST_G_3
-+0x4C38 US_ALU_CONST_B_3
-+0x4C3C US_ALU_CONST_A_3
-+0x4C40 US_ALU_CONST_R_4
-+0x4C44 US_ALU_CONST_G_4
-+0x4C48 US_ALU_CONST_B_4
-+0x4C4C US_ALU_CONST_A_4
-+0x4C50 US_ALU_CONST_R_5
-+0x4C54 US_ALU_CONST_G_5
-+0x4C58 US_ALU_CONST_B_5
-+0x4C5C US_ALU_CONST_A_5
-+0x4C60 US_ALU_CONST_R_6
-+0x4C64 US_ALU_CONST_G_6
-+0x4C68 US_ALU_CONST_B_6
-+0x4C6C US_ALU_CONST_A_6
-+0x4C70 US_ALU_CONST_R_7
-+0x4C74 US_ALU_CONST_G_7
-+0x4C78 US_ALU_CONST_B_7
-+0x4C7C US_ALU_CONST_A_7
-+0x4C80 US_ALU_CONST_R_8
-+0x4C84 US_ALU_CONST_G_8
-+0x4C88 US_ALU_CONST_B_8
-+0x4C8C US_ALU_CONST_A_8
-+0x4C90 US_ALU_CONST_R_9
-+0x4C94 US_ALU_CONST_G_9
-+0x4C98 US_ALU_CONST_B_9
-+0x4C9C US_ALU_CONST_A_9
-+0x4CA0 US_ALU_CONST_R_10
-+0x4CA4 US_ALU_CONST_G_10
-+0x4CA8 US_ALU_CONST_B_10
-+0x4CAC US_ALU_CONST_A_10
-+0x4CB0 US_ALU_CONST_R_11
-+0x4CB4 US_ALU_CONST_G_11
-+0x4CB8 US_ALU_CONST_B_11
-+0x4CBC US_ALU_CONST_A_11
-+0x4CC0 US_ALU_CONST_R_12
-+0x4CC4 US_ALU_CONST_G_12
-+0x4CC8 US_ALU_CONST_B_12
-+0x4CCC US_ALU_CONST_A_12
-+0x4CD0 US_ALU_CONST_R_13
-+0x4CD4 US_ALU_CONST_G_13
-+0x4CD8 US_ALU_CONST_B_13
-+0x4CDC US_ALU_CONST_A_13
-+0x4CE0 US_ALU_CONST_R_14
-+0x4CE4 US_ALU_CONST_G_14
-+0x4CE8 US_ALU_CONST_B_14
-+0x4CEC US_ALU_CONST_A_14
-+0x4CF0 US_ALU_CONST_R_15
-+0x4CF4 US_ALU_CONST_G_15
-+0x4CF8 US_ALU_CONST_B_15
-+0x4CFC US_ALU_CONST_A_15
-+0x4D00 US_ALU_CONST_R_16
-+0x4D04 US_ALU_CONST_G_16
-+0x4D08 US_ALU_CONST_B_16
-+0x4D0C US_ALU_CONST_A_16
-+0x4D10 US_ALU_CONST_R_17
-+0x4D14 US_ALU_CONST_G_17
-+0x4D18 US_ALU_CONST_B_17
-+0x4D1C US_ALU_CONST_A_17
-+0x4D20 US_ALU_CONST_R_18
-+0x4D24 US_ALU_CONST_G_18
-+0x4D28 US_ALU_CONST_B_18
-+0x4D2C US_ALU_CONST_A_18
-+0x4D30 US_ALU_CONST_R_19
-+0x4D34 US_ALU_CONST_G_19
-+0x4D38 US_ALU_CONST_B_19
-+0x4D3C US_ALU_CONST_A_19
-+0x4D40 US_ALU_CONST_R_20
-+0x4D44 US_ALU_CONST_G_20
-+0x4D48 US_ALU_CONST_B_20
-+0x4D4C US_ALU_CONST_A_20
-+0x4D50 US_ALU_CONST_R_21
-+0x4D54 US_ALU_CONST_G_21
-+0x4D58 US_ALU_CONST_B_21
-+0x4D5C US_ALU_CONST_A_21
-+0x4D60 US_ALU_CONST_R_22
-+0x4D64 US_ALU_CONST_G_22
-+0x4D68 US_ALU_CONST_B_22
-+0x4D6C US_ALU_CONST_A_22
-+0x4D70 US_ALU_CONST_R_23
-+0x4D74 US_ALU_CONST_G_23
-+0x4D78 US_ALU_CONST_B_23
-+0x4D7C US_ALU_CONST_A_23
-+0x4D80 US_ALU_CONST_R_24
-+0x4D84 US_ALU_CONST_G_24
-+0x4D88 US_ALU_CONST_B_24
-+0x4D8C US_ALU_CONST_A_24
-+0x4D90 US_ALU_CONST_R_25
-+0x4D94 US_ALU_CONST_G_25
-+0x4D98 US_ALU_CONST_B_25
-+0x4D9C US_ALU_CONST_A_25
-+0x4DA0 US_ALU_CONST_R_26
-+0x4DA4 US_ALU_CONST_G_26
-+0x4DA8 US_ALU_CONST_B_26
-+0x4DAC US_ALU_CONST_A_26
-+0x4DB0 US_ALU_CONST_R_27
-+0x4DB4 US_ALU_CONST_G_27
-+0x4DB8 US_ALU_CONST_B_27
-+0x4DBC US_ALU_CONST_A_27
-+0x4DC0 US_ALU_CONST_R_28
-+0x4DC4 US_ALU_CONST_G_28
-+0x4DC8 US_ALU_CONST_B_28
-+0x4DCC US_ALU_CONST_A_28
-+0x4DD0 US_ALU_CONST_R_29
-+0x4DD4 US_ALU_CONST_G_29
-+0x4DD8 US_ALU_CONST_B_29
-+0x4DDC US_ALU_CONST_A_29
-+0x4DE0 US_ALU_CONST_R_30
-+0x4DE4 US_ALU_CONST_G_30
-+0x4DE8 US_ALU_CONST_B_30
-+0x4DEC US_ALU_CONST_A_30
-+0x4DF0 US_ALU_CONST_R_31
-+0x4DF4 US_ALU_CONST_G_31
-+0x4DF8 US_ALU_CONST_B_31
-+0x4DFC US_ALU_CONST_A_31
-+0x4E04 RB3D_BLENDCNTL_R3
-+0x4E08 RB3D_ABLENDCNTL_R3
-+0x4E0C RB3D_COLOR_CHANNEL_MASK
-+0x4E10 RB3D_CONSTANT_COLOR
-+0x4E14 RB3D_COLOR_CLEAR_VALUE
-+0x4E18 RB3D_ROPCNTL_R3
-+0x4E1C RB3D_CLRCMP_FLIPE_R3
-+0x4E20 RB3D_CLRCMP_CLR_R3
-+0x4E24 RB3D_CLRCMP_MSK_R3
-+0x4E48 RB3D_DEBUG_CTL
-+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
-+0x4E50 RB3D_DITHER_CTL
-+0x4E54 RB3D_CMASK_OFFSET0
-+0x4E58 RB3D_CMASK_OFFSET1
-+0x4E5C RB3D_CMASK_OFFSET2
-+0x4E60 RB3D_CMASK_OFFSET3
-+0x4E64 RB3D_CMASK_PITCH0
-+0x4E68 RB3D_CMASK_PITCH1
-+0x4E6C RB3D_CMASK_PITCH2
-+0x4E70 RB3D_CMASK_PITCH3
-+0x4E74 RB3D_CMASK_WRINDEX
-+0x4E78 RB3D_CMASK_DWORD
-+0x4E7C RB3D_CMASK_RDINDEX
-+0x4E80 RB3D_AARESOLVE_OFFSET
-+0x4E84 RB3D_AARESOLVE_PITCH
-+0x4E88 RB3D_AARESOLVE_CTL
-+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
-+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
-+0x4EF8 RB3D_CONSTANT_COLOR_AR
-+0x4EFC RB3D_CONSTANT_COLOR_GB
-+0x4F04 ZB_ZSTENCILCNTL
-+0x4F08 ZB_STENCILREFMASK
-+0x4F14 ZB_ZTOP
-+0x4F18 ZB_ZCACHE_CTLSTAT
-+0x4F1C ZB_BW_CNTL
-+0x4F28 ZB_DEPTHCLEARVALUE
-+0x4F30 ZB_ZMASK_OFFSET
-+0x4F34 ZB_ZMASK_PITCH
-+0x4F38 ZB_ZMASK_WRINDEX
-+0x4F3C ZB_ZMASK_DWORD
-+0x4F40 ZB_ZMASK_RDINDEX
-+0x4F44 ZB_HIZ_OFFSET
-+0x4F48 ZB_HIZ_WRINDEX
-+0x4F4C ZB_HIZ_DWORD
-+0x4F50 ZB_HIZ_RDINDEX
-+0x4F54 ZB_HIZ_PITCH
-+0x4F58 ZB_ZPASS_DATA
-+0x4FD4 ZB_STENCILREFMASK_BF
-diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
-index b29affd..8c3ea7e 100644
---- a/drivers/gpu/drm/radeon/rs400.c
-+++ b/drivers/gpu/drm/radeon/rs400.c
-@@ -63,7 +63,7 @@ void rs400_gart_adjust_size(struct radeon_device *rdev)
- break;
- default:
- DRM_ERROR("Unable to use IGP GART size %uM\n",
-- rdev->mc.gtt_size >> 20);
-+ (unsigned)(rdev->mc.gtt_size >> 20));
- DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
- DRM_ERROR("Forcing to 32M GART size\n");
- rdev->mc.gtt_size = 32 * 1024 * 1024;
-diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
-index 02fd11a..1b8d62f 100644
---- a/drivers/gpu/drm/radeon/rs600.c
-+++ b/drivers/gpu/drm/radeon/rs600.c
-@@ -29,6 +29,8 @@
- #include "radeon_reg.h"
- #include "radeon.h"
-
-+#include "rs600_reg_safe.h"
-+
- /* rs600 depends on : */
- void r100_hdp_reset(struct radeon_device *rdev);
- int r100_gui_wait_for_idle(struct radeon_device *rdev);
-@@ -410,64 +412,6 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
- WREG32(RS600_MC_DATA, v);
- }
-
--static const unsigned rs600_reg_safe_bm[219] = {
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
-- 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
-- 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
-- 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
-- 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
-- 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
-- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-- 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
-- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-- 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
--};
--
- int rs600_init(struct radeon_device *rdev)
- {
- rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
-diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
-index 8798825..839595b 100644
---- a/drivers/gpu/drm/radeon/rs690.c
-+++ b/drivers/gpu/drm/radeon/rs690.c
-@@ -652,4 +652,3 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
- WREG32(RS690_MC_DATA, v);
- WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
- }
--
-diff --git a/drivers/gpu/drm/radeon/rs780.c b/drivers/gpu/drm/radeon/rs780.c
-deleted file mode 100644
-index 0affcff..0000000
---- a/drivers/gpu/drm/radeon/rs780.c
-+++ /dev/null
-@@ -1,102 +0,0 @@
--/*
-- * Copyright 2008 Advanced Micro Devices, Inc.
-- * Copyright 2008 Red Hat Inc.
-- * Copyright 2009 Jerome Glisse.
-- *
-- * Permission is hereby granted, free of charge, to any person obtaining a
-- * copy of this software and associated documentation files (the "Software"),
-- * to deal in the Software without restriction, including without limitation
-- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-- * and/or sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following conditions:
-- *
-- * The above copyright notice and this permission notice shall be included in
-- * all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
-- *
-- * Authors: Dave Airlie
-- * Alex Deucher
-- * Jerome Glisse
-- */
--#include "drmP.h"
--#include "radeon_reg.h"
--#include "radeon.h"
--
--/* rs780 depends on : */
--void rs600_mc_disable_clients(struct radeon_device *rdev);
--
--/* This files gather functions specifics to:
-- * rs780
-- *
-- * Some of these functions might be used by newer ASICs.
-- */
--int rs780_mc_wait_for_idle(struct radeon_device *rdev);
--void rs780_gpu_init(struct radeon_device *rdev);
--
--
--/*
-- * MC
-- */
--int rs780_mc_init(struct radeon_device *rdev)
--{
-- rs780_gpu_init(rdev);
-- /* FIXME: implement */
--
-- rs600_mc_disable_clients(rdev);
-- if (rs780_mc_wait_for_idle(rdev)) {
-- printk(KERN_WARNING "Failed to wait MC idle while "
-- "programming pipes. Bad things might happen.\n");
-- }
-- return 0;
--}
--
--void rs780_mc_fini(struct radeon_device *rdev)
--{
-- /* FIXME: implement */
--}
--
--
--/*
-- * Global GPU functions
-- */
--void rs780_errata(struct radeon_device *rdev)
--{
-- rdev->pll_errata = 0;
--}
--
--int rs780_mc_wait_for_idle(struct radeon_device *rdev)
--{
-- /* FIXME: implement */
-- return 0;
--}
--
--void rs780_gpu_init(struct radeon_device *rdev)
--{
-- /* FIXME: implement */
--}
--
--
--/*
-- * VRAM info
-- */
--void rs780_vram_get_type(struct radeon_device *rdev)
--{
-- /* FIXME: implement */
--}
--
--void rs780_vram_info(struct radeon_device *rdev)
--{
-- rs780_vram_get_type(rdev);
--
-- /* FIXME: implement */
-- /* Could aper size report 0 ? */
-- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
--}
-diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
-index 0566fb6..99e397f 100644
---- a/drivers/gpu/drm/radeon/rv515.c
-+++ b/drivers/gpu/drm/radeon/rv515.c
-@@ -27,10 +27,11 @@
- */
- #include <linux/seq_file.h>
- #include "drmP.h"
--#include "rv515r.h"
-+#include "rv515d.h"
- #include "radeon.h"
- #include "radeon_share.h"
-
-+#include "rv515_reg_safe.h"
- /* rv515 depends on : */
- void r100_hdp_reset(struct radeon_device *rdev);
- int r100_cp_reset(struct radeon_device *rdev);
-@@ -464,301 +465,244 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
- #endif
- }
-
--
- /*
- * Asic initialization
- */
--static const unsigned r500_reg_safe_bm[219] = {
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
-- 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
-- 0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF,
-- 0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF,
-- 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
-- 0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
-- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-- 0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-- 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
--};
--
- int rv515_init(struct radeon_device *rdev)
- {
-- rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
-- rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
-+ rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
-+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
- return 0;
- }
-
--void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
-+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc)
- {
--
-- WREG32(0x659C, 0x0);
-- WREG32(0x6594, 0x705);
-- WREG32(0x65A4, 0x10001);
-- WREG32(0x65D8, 0x0);
-- WREG32(0x65B0, 0x0);
-- WREG32(0x65C0, 0x0);
-- WREG32(0x65D4, 0x0);
-- WREG32(0x6578, 0x0);
-- WREG32(0x657C, 0x841880A8);
-- WREG32(0x6578, 0x1);
-- WREG32(0x657C, 0x84208680);
-- WREG32(0x6578, 0x2);
-- WREG32(0x657C, 0xBFF880B0);
-- WREG32(0x6578, 0x100);
-- WREG32(0x657C, 0x83D88088);
-- WREG32(0x6578, 0x101);
-- WREG32(0x657C, 0x84608680);
-- WREG32(0x6578, 0x102);
-- WREG32(0x657C, 0xBFF080D0);
-- WREG32(0x6578, 0x200);
-- WREG32(0x657C, 0x83988068);
-- WREG32(0x6578, 0x201);
-- WREG32(0x657C, 0x84A08680);
-- WREG32(0x6578, 0x202);
-- WREG32(0x657C, 0xBFF080F8);
-- WREG32(0x6578, 0x300);
-- WREG32(0x657C, 0x83588058);
-- WREG32(0x6578, 0x301);
-- WREG32(0x657C, 0x84E08660);
-- WREG32(0x6578, 0x302);
-- WREG32(0x657C, 0xBFF88120);
-- WREG32(0x6578, 0x400);
-- WREG32(0x657C, 0x83188040);
-- WREG32(0x6578, 0x401);
-- WREG32(0x657C, 0x85008660);
-- WREG32(0x6578, 0x402);
-- WREG32(0x657C, 0xBFF88150);
-- WREG32(0x6578, 0x500);
-- WREG32(0x657C, 0x82D88030);
-- WREG32(0x6578, 0x501);
-- WREG32(0x657C, 0x85408640);
-- WREG32(0x6578, 0x502);
-- WREG32(0x657C, 0xBFF88180);
-- WREG32(0x6578, 0x600);
-- WREG32(0x657C, 0x82A08018);
-- WREG32(0x6578, 0x601);
-- WREG32(0x657C, 0x85808620);
-- WREG32(0x6578, 0x602);
-- WREG32(0x657C, 0xBFF081B8);
-- WREG32(0x6578, 0x700);
-- WREG32(0x657C, 0x82608010);
-- WREG32(0x6578, 0x701);
-- WREG32(0x657C, 0x85A08600);
-- WREG32(0x6578, 0x702);
-- WREG32(0x657C, 0x800081F0);
-- WREG32(0x6578, 0x800);
-- WREG32(0x657C, 0x8228BFF8);
-- WREG32(0x6578, 0x801);
-- WREG32(0x657C, 0x85E085E0);
-- WREG32(0x6578, 0x802);
-- WREG32(0x657C, 0xBFF88228);
-- WREG32(0x6578, 0x10000);
-- WREG32(0x657C, 0x82A8BF00);
-- WREG32(0x6578, 0x10001);
-- WREG32(0x657C, 0x82A08CC0);
-- WREG32(0x6578, 0x10002);
-- WREG32(0x657C, 0x8008BEF8);
-- WREG32(0x6578, 0x10100);
-- WREG32(0x657C, 0x81F0BF28);
-- WREG32(0x6578, 0x10101);
-- WREG32(0x657C, 0x83608CA0);
-- WREG32(0x6578, 0x10102);
-- WREG32(0x657C, 0x8018BED0);
-- WREG32(0x6578, 0x10200);
-- WREG32(0x657C, 0x8148BF38);
-- WREG32(0x6578, 0x10201);
-- WREG32(0x657C, 0x84408C80);
-- WREG32(0x6578, 0x10202);
-- WREG32(0x657C, 0x8008BEB8);
-- WREG32(0x6578, 0x10300);
-- WREG32(0x657C, 0x80B0BF78);
-- WREG32(0x6578, 0x10301);
-- WREG32(0x657C, 0x85008C20);
-- WREG32(0x6578, 0x10302);
-- WREG32(0x657C, 0x8020BEA0);
-- WREG32(0x6578, 0x10400);
-- WREG32(0x657C, 0x8028BF90);
-- WREG32(0x6578, 0x10401);
-- WREG32(0x657C, 0x85E08BC0);
-- WREG32(0x6578, 0x10402);
-- WREG32(0x657C, 0x8018BE90);
-- WREG32(0x6578, 0x10500);
-- WREG32(0x657C, 0xBFB8BFB0);
-- WREG32(0x6578, 0x10501);
-- WREG32(0x657C, 0x86C08B40);
-- WREG32(0x6578, 0x10502);
-- WREG32(0x657C, 0x8010BE90);
-- WREG32(0x6578, 0x10600);
-- WREG32(0x657C, 0xBF58BFC8);
-- WREG32(0x6578, 0x10601);
-- WREG32(0x657C, 0x87A08AA0);
-- WREG32(0x6578, 0x10602);
-- WREG32(0x657C, 0x8010BE98);
-- WREG32(0x6578, 0x10700);
-- WREG32(0x657C, 0xBF10BFF0);
-- WREG32(0x6578, 0x10701);
-- WREG32(0x657C, 0x886089E0);
-- WREG32(0x6578, 0x10702);
-- WREG32(0x657C, 0x8018BEB0);
-- WREG32(0x6578, 0x10800);
-- WREG32(0x657C, 0xBED8BFE8);
-- WREG32(0x6578, 0x10801);
-- WREG32(0x657C, 0x89408940);
-- WREG32(0x6578, 0x10802);
-- WREG32(0x657C, 0xBFE8BED8);
-- WREG32(0x6578, 0x20000);
-- WREG32(0x657C, 0x80008000);
-- WREG32(0x6578, 0x20001);
-- WREG32(0x657C, 0x90008000);
-- WREG32(0x6578, 0x20002);
-- WREG32(0x657C, 0x80008000);
-- WREG32(0x6578, 0x20003);
-- WREG32(0x657C, 0x80008000);
-- WREG32(0x6578, 0x20100);
-- WREG32(0x657C, 0x80108000);
-- WREG32(0x6578, 0x20101);
-- WREG32(0x657C, 0x8FE0BF70);
-- WREG32(0x6578, 0x20102);
-- WREG32(0x657C, 0xBFE880C0);
-- WREG32(0x6578, 0x20103);
-- WREG32(0x657C, 0x80008000);
-- WREG32(0x6578, 0x20200);
-- WREG32(0x657C, 0x8018BFF8);
-- WREG32(0x6578, 0x20201);
-- WREG32(0x657C, 0x8F80BF08);
-- WREG32(0x6578, 0x20202);
-- WREG32(0x657C, 0xBFD081A0);
-- WREG32(0x6578, 0x20203);
-- WREG32(0x657C, 0xBFF88000);
-- WREG32(0x6578, 0x20300);
-- WREG32(0x657C, 0x80188000);
-- WREG32(0x6578, 0x20301);
-- WREG32(0x657C, 0x8EE0BEC0);
-- WREG32(0x6578, 0x20302);
-- WREG32(0x657C, 0xBFB082A0);
-- WREG32(0x6578, 0x20303);
-- WREG32(0x657C, 0x80008000);
-- WREG32(0x6578, 0x20400);
-- WREG32(0x657C, 0x80188000);
-- WREG32(0x6578, 0x20401);
-- WREG32(0x657C, 0x8E00BEA0);
-- WREG32(0x6578, 0x20402);
-- WREG32(0x657C, 0xBF8883C0);
-- WREG32(0x6578, 0x20403);
-- WREG32(0x657C, 0x80008000);
-- WREG32(0x6578, 0x20500);
-- WREG32(0x657C, 0x80188000);
-- WREG32(0x6578, 0x20501);
-- WREG32(0x657C, 0x8D00BE90);
-- WREG32(0x6578, 0x20502);
-- WREG32(0x657C, 0xBF588500);
-- WREG32(0x6578, 0x20503);
-- WREG32(0x657C, 0x80008008);
-- WREG32(0x6578, 0x20600);
-- WREG32(0x657C, 0x80188000);
-- WREG32(0x6578, 0x20601);
-- WREG32(0x657C, 0x8BC0BE98);
-- WREG32(0x6578, 0x20602);
-- WREG32(0x657C, 0xBF308660);
-- WREG32(0x6578, 0x20603);
-- WREG32(0x657C, 0x80008008);
-- WREG32(0x6578, 0x20700);
-- WREG32(0x657C, 0x80108000);
-- WREG32(0x6578, 0x20701);
-- WREG32(0x657C, 0x8A80BEB0);
-- WREG32(0x6578, 0x20702);
-- WREG32(0x657C, 0xBF0087C0);
-- WREG32(0x6578, 0x20703);
-- WREG32(0x657C, 0x80008008);
-- WREG32(0x6578, 0x20800);
-- WREG32(0x657C, 0x80108000);
-- WREG32(0x6578, 0x20801);
-- WREG32(0x657C, 0x8920BED0);
-- WREG32(0x6578, 0x20802);
-- WREG32(0x657C, 0xBED08920);
-- WREG32(0x6578, 0x20803);
-- WREG32(0x657C, 0x80008010);
-- WREG32(0x6578, 0x30000);
-- WREG32(0x657C, 0x90008000);
-- WREG32(0x6578, 0x30001);
-- WREG32(0x657C, 0x80008000);
-- WREG32(0x6578, 0x30100);
-- WREG32(0x657C, 0x8FE0BF90);
-- WREG32(0x6578, 0x30101);
-- WREG32(0x657C, 0xBFF880A0);
-- WREG32(0x6578, 0x30200);
-- WREG32(0x657C, 0x8F60BF40);
-- WREG32(0x6578, 0x30201);
-- WREG32(0x657C, 0xBFE88180);
-- WREG32(0x6578, 0x30300);
-- WREG32(0x657C, 0x8EC0BF00);
-- WREG32(0x6578, 0x30301);
-- WREG32(0x657C, 0xBFC88280);
-- WREG32(0x6578, 0x30400);
-- WREG32(0x657C, 0x8DE0BEE0);
-- WREG32(0x6578, 0x30401);
-- WREG32(0x657C, 0xBFA083A0);
-- WREG32(0x6578, 0x30500);
-- WREG32(0x657C, 0x8CE0BED0);
-- WREG32(0x6578, 0x30501);
-- WREG32(0x657C, 0xBF7884E0);
-- WREG32(0x6578, 0x30600);
-- WREG32(0x657C, 0x8BA0BED8);
-- WREG32(0x6578, 0x30601);
-- WREG32(0x657C, 0xBF508640);
-- WREG32(0x6578, 0x30700);
-- WREG32(0x657C, 0x8A60BEE8);
-- WREG32(0x6578, 0x30701);
-- WREG32(0x657C, 0xBF2087A0);
-- WREG32(0x6578, 0x30800);
-- WREG32(0x657C, 0x8900BF00);
-- WREG32(0x6578, 0x30801);
-- WREG32(0x657C, 0xBF008900);
-+ int index_reg = 0x6578 + crtc->crtc_offset;
-+ int data_reg = 0x657c + crtc->crtc_offset;
-+
-+ WREG32(0x659C + crtc->crtc_offset, 0x0);
-+ WREG32(0x6594 + crtc->crtc_offset, 0x705);
-+ WREG32(0x65A4 + crtc->crtc_offset, 0x10001);
-+ WREG32(0x65D8 + crtc->crtc_offset, 0x0);
-+ WREG32(0x65B0 + crtc->crtc_offset, 0x0);
-+ WREG32(0x65C0 + crtc->crtc_offset, 0x0);
-+ WREG32(0x65D4 + crtc->crtc_offset, 0x0);
-+ WREG32(index_reg, 0x0);
-+ WREG32(data_reg, 0x841880A8);
-+ WREG32(index_reg, 0x1);
-+ WREG32(data_reg, 0x84208680);
-+ WREG32(index_reg, 0x2);
-+ WREG32(data_reg, 0xBFF880B0);
-+ WREG32(index_reg, 0x100);
-+ WREG32(data_reg, 0x83D88088);
-+ WREG32(index_reg, 0x101);
-+ WREG32(data_reg, 0x84608680);
-+ WREG32(index_reg, 0x102);
-+ WREG32(data_reg, 0xBFF080D0);
-+ WREG32(index_reg, 0x200);
-+ WREG32(data_reg, 0x83988068);
-+ WREG32(index_reg, 0x201);
-+ WREG32(data_reg, 0x84A08680);
-+ WREG32(index_reg, 0x202);
-+ WREG32(data_reg, 0xBFF080F8);
-+ WREG32(index_reg, 0x300);
-+ WREG32(data_reg, 0x83588058);
-+ WREG32(index_reg, 0x301);
-+ WREG32(data_reg, 0x84E08660);
-+ WREG32(index_reg, 0x302);
-+ WREG32(data_reg, 0xBFF88120);
-+ WREG32(index_reg, 0x400);
-+ WREG32(data_reg, 0x83188040);
-+ WREG32(index_reg, 0x401);
-+ WREG32(data_reg, 0x85008660);
-+ WREG32(index_reg, 0x402);
-+ WREG32(data_reg, 0xBFF88150);
-+ WREG32(index_reg, 0x500);
-+ WREG32(data_reg, 0x82D88030);
-+ WREG32(index_reg, 0x501);
-+ WREG32(data_reg, 0x85408640);
-+ WREG32(index_reg, 0x502);
-+ WREG32(data_reg, 0xBFF88180);
-+ WREG32(index_reg, 0x600);
-+ WREG32(data_reg, 0x82A08018);
-+ WREG32(index_reg, 0x601);
-+ WREG32(data_reg, 0x85808620);
-+ WREG32(index_reg, 0x602);
-+ WREG32(data_reg, 0xBFF081B8);
-+ WREG32(index_reg, 0x700);
-+ WREG32(data_reg, 0x82608010);
-+ WREG32(index_reg, 0x701);
-+ WREG32(data_reg, 0x85A08600);
-+ WREG32(index_reg, 0x702);
-+ WREG32(data_reg, 0x800081F0);
-+ WREG32(index_reg, 0x800);
-+ WREG32(data_reg, 0x8228BFF8);
-+ WREG32(index_reg, 0x801);
-+ WREG32(data_reg, 0x85E085E0);
-+ WREG32(index_reg, 0x802);
-+ WREG32(data_reg, 0xBFF88228);
-+ WREG32(index_reg, 0x10000);
-+ WREG32(data_reg, 0x82A8BF00);
-+ WREG32(index_reg, 0x10001);
-+ WREG32(data_reg, 0x82A08CC0);
-+ WREG32(index_reg, 0x10002);
-+ WREG32(data_reg, 0x8008BEF8);
-+ WREG32(index_reg, 0x10100);
-+ WREG32(data_reg, 0x81F0BF28);
-+ WREG32(index_reg, 0x10101);
-+ WREG32(data_reg, 0x83608CA0);
-+ WREG32(index_reg, 0x10102);
-+ WREG32(data_reg, 0x8018BED0);
-+ WREG32(index_reg, 0x10200);
-+ WREG32(data_reg, 0x8148BF38);
-+ WREG32(index_reg, 0x10201);
-+ WREG32(data_reg, 0x84408C80);
-+ WREG32(index_reg, 0x10202);
-+ WREG32(data_reg, 0x8008BEB8);
-+ WREG32(index_reg, 0x10300);
-+ WREG32(data_reg, 0x80B0BF78);
-+ WREG32(index_reg, 0x10301);
-+ WREG32(data_reg, 0x85008C20);
-+ WREG32(index_reg, 0x10302);
-+ WREG32(data_reg, 0x8020BEA0);
-+ WREG32(index_reg, 0x10400);
-+ WREG32(data_reg, 0x8028BF90);
-+ WREG32(index_reg, 0x10401);
-+ WREG32(data_reg, 0x85E08BC0);
-+ WREG32(index_reg, 0x10402);
-+ WREG32(data_reg, 0x8018BE90);
-+ WREG32(index_reg, 0x10500);
-+ WREG32(data_reg, 0xBFB8BFB0);
-+ WREG32(index_reg, 0x10501);
-+ WREG32(data_reg, 0x86C08B40);
-+ WREG32(index_reg, 0x10502);
-+ WREG32(data_reg, 0x8010BE90);
-+ WREG32(index_reg, 0x10600);
-+ WREG32(data_reg, 0xBF58BFC8);
-+ WREG32(index_reg, 0x10601);
-+ WREG32(data_reg, 0x87A08AA0);
-+ WREG32(index_reg, 0x10602);
-+ WREG32(data_reg, 0x8010BE98);
-+ WREG32(index_reg, 0x10700);
-+ WREG32(data_reg, 0xBF10BFF0);
-+ WREG32(index_reg, 0x10701);
-+ WREG32(data_reg, 0x886089E0);
-+ WREG32(index_reg, 0x10702);
-+ WREG32(data_reg, 0x8018BEB0);
-+ WREG32(index_reg, 0x10800);
-+ WREG32(data_reg, 0xBED8BFE8);
-+ WREG32(index_reg, 0x10801);
-+ WREG32(data_reg, 0x89408940);
-+ WREG32(index_reg, 0x10802);
-+ WREG32(data_reg, 0xBFE8BED8);
-+ WREG32(index_reg, 0x20000);
-+ WREG32(data_reg, 0x80008000);
-+ WREG32(index_reg, 0x20001);
-+ WREG32(data_reg, 0x90008000);
-+ WREG32(index_reg, 0x20002);
-+ WREG32(data_reg, 0x80008000);
-+ WREG32(index_reg, 0x20003);
-+ WREG32(data_reg, 0x80008000);
-+ WREG32(index_reg, 0x20100);
-+ WREG32(data_reg, 0x80108000);
-+ WREG32(index_reg, 0x20101);
-+ WREG32(data_reg, 0x8FE0BF70);
-+ WREG32(index_reg, 0x20102);
-+ WREG32(data_reg, 0xBFE880C0);
-+ WREG32(index_reg, 0x20103);
-+ WREG32(data_reg, 0x80008000);
-+ WREG32(index_reg, 0x20200);
-+ WREG32(data_reg, 0x8018BFF8);
-+ WREG32(index_reg, 0x20201);
-+ WREG32(data_reg, 0x8F80BF08);
-+ WREG32(index_reg, 0x20202);
-+ WREG32(data_reg, 0xBFD081A0);
-+ WREG32(index_reg, 0x20203);
-+ WREG32(data_reg, 0xBFF88000);
-+ WREG32(index_reg, 0x20300);
-+ WREG32(data_reg, 0x80188000);
-+ WREG32(index_reg, 0x20301);
-+ WREG32(data_reg, 0x8EE0BEC0);
-+ WREG32(index_reg, 0x20302);
-+ WREG32(data_reg, 0xBFB082A0);
-+ WREG32(index_reg, 0x20303);
-+ WREG32(data_reg, 0x80008000);
-+ WREG32(index_reg, 0x20400);
-+ WREG32(data_reg, 0x80188000);
-+ WREG32(index_reg, 0x20401);
-+ WREG32(data_reg, 0x8E00BEA0);
-+ WREG32(index_reg, 0x20402);
-+ WREG32(data_reg, 0xBF8883C0);
-+ WREG32(index_reg, 0x20403);
-+ WREG32(data_reg, 0x80008000);
-+ WREG32(index_reg, 0x20500);
-+ WREG32(data_reg, 0x80188000);
-+ WREG32(index_reg, 0x20501);
-+ WREG32(data_reg, 0x8D00BE90);
-+ WREG32(index_reg, 0x20502);
-+ WREG32(data_reg, 0xBF588500);
-+ WREG32(index_reg, 0x20503);
-+ WREG32(data_reg, 0x80008008);
-+ WREG32(index_reg, 0x20600);
-+ WREG32(data_reg, 0x80188000);
-+ WREG32(index_reg, 0x20601);
-+ WREG32(data_reg, 0x8BC0BE98);
-+ WREG32(index_reg, 0x20602);
-+ WREG32(data_reg, 0xBF308660);
-+ WREG32(index_reg, 0x20603);
-+ WREG32(data_reg, 0x80008008);
-+ WREG32(index_reg, 0x20700);
-+ WREG32(data_reg, 0x80108000);
-+ WREG32(index_reg, 0x20701);
-+ WREG32(data_reg, 0x8A80BEB0);
-+ WREG32(index_reg, 0x20702);
-+ WREG32(data_reg, 0xBF0087C0);
-+ WREG32(index_reg, 0x20703);
-+ WREG32(data_reg, 0x80008008);
-+ WREG32(index_reg, 0x20800);
-+ WREG32(data_reg, 0x80108000);
-+ WREG32(index_reg, 0x20801);
-+ WREG32(data_reg, 0x8920BED0);
-+ WREG32(index_reg, 0x20802);
-+ WREG32(data_reg, 0xBED08920);
-+ WREG32(index_reg, 0x20803);
-+ WREG32(data_reg, 0x80008010);
-+ WREG32(index_reg, 0x30000);
-+ WREG32(data_reg, 0x90008000);
-+ WREG32(index_reg, 0x30001);
-+ WREG32(data_reg, 0x80008000);
-+ WREG32(index_reg, 0x30100);
-+ WREG32(data_reg, 0x8FE0BF90);
-+ WREG32(index_reg, 0x30101);
-+ WREG32(data_reg, 0xBFF880A0);
-+ WREG32(index_reg, 0x30200);
-+ WREG32(data_reg, 0x8F60BF40);
-+ WREG32(index_reg, 0x30201);
-+ WREG32(data_reg, 0xBFE88180);
-+ WREG32(index_reg, 0x30300);
-+ WREG32(data_reg, 0x8EC0BF00);
-+ WREG32(index_reg, 0x30301);
-+ WREG32(data_reg, 0xBFC88280);
-+ WREG32(index_reg, 0x30400);
-+ WREG32(data_reg, 0x8DE0BEE0);
-+ WREG32(index_reg, 0x30401);
-+ WREG32(data_reg, 0xBFA083A0);
-+ WREG32(index_reg, 0x30500);
-+ WREG32(data_reg, 0x8CE0BED0);
-+ WREG32(index_reg, 0x30501);
-+ WREG32(data_reg, 0xBF7884E0);
-+ WREG32(index_reg, 0x30600);
-+ WREG32(data_reg, 0x8BA0BED8);
-+ WREG32(index_reg, 0x30601);
-+ WREG32(data_reg, 0xBF508640);
-+ WREG32(index_reg, 0x30700);
-+ WREG32(data_reg, 0x8A60BEE8);
-+ WREG32(index_reg, 0x30701);
-+ WREG32(data_reg, 0xBF2087A0);
-+ WREG32(index_reg, 0x30800);
-+ WREG32(data_reg, 0x8900BF00);
-+ WREG32(index_reg, 0x30801);
-+ WREG32(data_reg, 0xBF008900);
- }
-
- struct rv515_watermark {
-diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
-new file mode 100644
-index 0000000..a65e17e
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/rv515d.h
-@@ -0,0 +1,220 @@
-+/*
-+ * Copyright 2008 Advanced Micro Devices, Inc.
-+ * Copyright 2008 Red Hat Inc.
-+ * Copyright 2009 Jerome Glisse.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: Dave Airlie
-+ * Alex Deucher
-+ * Jerome Glisse
-+ */
-+#ifndef __RV515D_H__
-+#define __RV515D_H__
-+
-+/*
-+ * RV515 registers
-+ */
-+#define PCIE_INDEX 0x0030
-+#define PCIE_DATA 0x0034
-+#define MC_IND_INDEX 0x0070
-+#define MC_IND_WR_EN (1 << 24)
-+#define MC_IND_DATA 0x0074
-+#define RBBM_SOFT_RESET 0x00F0
-+#define CONFIG_MEMSIZE 0x00F8
-+#define HDP_FB_LOCATION 0x0134
-+#define CP_CSQ_CNTL 0x0740
-+#define CP_CSQ_MODE 0x0744
-+#define CP_CSQ_ADDR 0x07F0
-+#define CP_CSQ_DATA 0x07F4
-+#define CP_CSQ_STAT 0x07F8
-+#define CP_CSQ2_STAT 0x07FC
-+#define RBBM_STATUS 0x0E40
-+#define DST_PIPE_CONFIG 0x170C
-+#define WAIT_UNTIL 0x1720
-+#define WAIT_2D_IDLE (1 << 14)
-+#define WAIT_3D_IDLE (1 << 15)
-+#define WAIT_2D_IDLECLEAN (1 << 16)
-+#define WAIT_3D_IDLECLEAN (1 << 17)
-+#define ISYNC_CNTL 0x1724
-+#define ISYNC_ANY2D_IDLE3D (1 << 0)
-+#define ISYNC_ANY3D_IDLE2D (1 << 1)
-+#define ISYNC_TRIG2D_IDLE3D (1 << 2)
-+#define ISYNC_TRIG3D_IDLE2D (1 << 3)
-+#define ISYNC_WAIT_IDLEGUI (1 << 4)
-+#define ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
-+#define VAP_INDEX_OFFSET 0x208C
-+#define VAP_PVS_STATE_FLUSH_REG 0x2284
-+#define GB_ENABLE 0x4008
-+#define GB_MSPOS0 0x4010
-+#define MS_X0_SHIFT 0
-+#define MS_Y0_SHIFT 4
-+#define MS_X1_SHIFT 8
-+#define MS_Y1_SHIFT 12
-+#define MS_X2_SHIFT 16
-+#define MS_Y2_SHIFT 20
-+#define MSBD0_Y_SHIFT 24
-+#define MSBD0_X_SHIFT 28
-+#define GB_MSPOS1 0x4014
-+#define MS_X3_SHIFT 0
-+#define MS_Y3_SHIFT 4
-+#define MS_X4_SHIFT 8
-+#define MS_Y4_SHIFT 12
-+#define MS_X5_SHIFT 16
-+#define MS_Y5_SHIFT 20
-+#define MSBD1_SHIFT 24
-+#define GB_TILE_CONFIG 0x4018
-+#define ENABLE_TILING (1 << 0)
-+#define PIPE_COUNT_MASK 0x0000000E
-+#define PIPE_COUNT_SHIFT 1
-+#define TILE_SIZE_8 (0 << 4)
-+#define TILE_SIZE_16 (1 << 4)
-+#define TILE_SIZE_32 (2 << 4)
-+#define SUBPIXEL_1_12 (0 << 16)
-+#define SUBPIXEL_1_16 (1 << 16)
-+#define GB_SELECT 0x401C
-+#define GB_AA_CONFIG 0x4020
-+#define GB_PIPE_SELECT 0x402C
-+#define GA_ENHANCE 0x4274
-+#define GA_DEADLOCK_CNTL (1 << 0)
-+#define GA_FASTSYNC_CNTL (1 << 1)
-+#define GA_POLY_MODE 0x4288
-+#define FRONT_PTYPE_POINT (0 << 4)
-+#define FRONT_PTYPE_LINE (1 << 4)
-+#define FRONT_PTYPE_TRIANGE (2 << 4)
-+#define BACK_PTYPE_POINT (0 << 7)
-+#define BACK_PTYPE_LINE (1 << 7)
-+#define BACK_PTYPE_TRIANGE (2 << 7)
-+#define GA_ROUND_MODE 0x428C
-+#define GEOMETRY_ROUND_TRUNC (0 << 0)
-+#define GEOMETRY_ROUND_NEAREST (1 << 0)
-+#define COLOR_ROUND_TRUNC (0 << 2)
-+#define COLOR_ROUND_NEAREST (1 << 2)
-+#define SU_REG_DEST 0x42C8
-+#define RB3D_DSTCACHE_CTLSTAT 0x4E4C
-+#define RB3D_DC_FLUSH (2 << 0)
-+#define RB3D_DC_FREE (2 << 2)
-+#define RB3D_DC_FINISH (1 << 4)
-+#define ZB_ZCACHE_CTLSTAT 0x4F18
-+#define ZC_FLUSH (1 << 0)
-+#define ZC_FREE (1 << 1)
-+#define DC_LB_MEMORY_SPLIT 0x6520
-+#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
-+#define DC_LB_MEMORY_SPLIT_SHIFT 0
-+#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
-+#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
-+#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
-+#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
-+#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
-+#define DC_LB_DISP1_END_ADR_SHIFT 4
-+#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
-+#define D1MODE_PRIORITY_A_CNT 0x6548
-+#define MODE_PRIORITY_MARK_MASK 0x00007FFF
-+#define MODE_PRIORITY_OFF (1 << 16)
-+#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
-+#define MODE_PRIORITY_FORCE_MASK (1 << 24)
-+#define D1MODE_PRIORITY_B_CNT 0x654C
-+#define LB_MAX_REQ_OUTSTANDING 0x6D58
-+#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
-+#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
-+#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
-+#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
-+#define D2MODE_PRIORITY_A_CNT 0x6D48
-+#define D2MODE_PRIORITY_B_CNT 0x6D4C
-+
-+/* ix[MC] registers */
-+#define MC_FB_LOCATION 0x01
-+#define MC_FB_START_MASK 0x0000FFFF
-+#define MC_FB_START_SHIFT 0
-+#define MC_FB_TOP_MASK 0xFFFF0000
-+#define MC_FB_TOP_SHIFT 16
-+#define MC_AGP_LOCATION 0x02
-+#define MC_AGP_START_MASK 0x0000FFFF
-+#define MC_AGP_START_SHIFT 0
-+#define MC_AGP_TOP_MASK 0xFFFF0000
-+#define MC_AGP_TOP_SHIFT 16
-+#define MC_AGP_BASE 0x03
-+#define MC_AGP_BASE_2 0x04
-+#define MC_CNTL 0x5
-+#define MEM_NUM_CHANNELS_MASK 0x00000003
-+#define MC_STATUS 0x08
-+#define MC_STATUS_IDLE (1 << 4)
-+#define MC_MISC_LAT_TIMER 0x09
-+#define MC_CPR_INIT_LAT_MASK 0x0000000F
-+#define MC_VF_INIT_LAT_MASK 0x000000F0
-+#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
-+#define MC_DISP0R_INIT_LAT_SHIFT 8
-+#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
-+#define MC_DISP1R_INIT_LAT_SHIFT 12
-+#define MC_FIXED_INIT_LAT_MASK 0x000F0000
-+#define MC_E2R_INIT_LAT_MASK 0x00F00000
-+#define SAME_PAGE_PRIO_MASK 0x0F000000
-+#define MC_GLOBW_INIT_LAT_MASK 0xF0000000
-+
-+
-+/*
-+ * PM4 packet
-+ */
-+#define CP_PACKET0 0x00000000
-+#define PACKET0_BASE_INDEX_SHIFT 0
-+#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
-+#define PACKET0_COUNT_SHIFT 16
-+#define PACKET0_COUNT_MASK (0x3fff << 16)
-+#define CP_PACKET1 0x40000000
-+#define CP_PACKET2 0x80000000
-+#define PACKET2_PAD_SHIFT 0
-+#define PACKET2_PAD_MASK (0x3fffffff << 0)
-+#define CP_PACKET3 0xC0000000
-+#define PACKET3_IT_OPCODE_SHIFT 8
-+#define PACKET3_IT_OPCODE_MASK (0xff << 8)
-+#define PACKET3_COUNT_SHIFT 16
-+#define PACKET3_COUNT_MASK (0x3fff << 16)
-+/* PACKET3 op code */
-+#define PACKET3_NOP 0x10
-+#define PACKET3_3D_DRAW_VBUF 0x28
-+#define PACKET3_3D_DRAW_IMMD 0x29
-+#define PACKET3_3D_DRAW_INDX 0x2A
-+#define PACKET3_3D_LOAD_VBPNTR 0x2F
-+#define PACKET3_INDX_BUFFER 0x33
-+#define PACKET3_3D_DRAW_VBUF_2 0x34
-+#define PACKET3_3D_DRAW_IMMD_2 0x35
-+#define PACKET3_3D_DRAW_INDX_2 0x36
-+#define PACKET3_BITBLT_MULTI 0x9B
-+
-+#define PACKET0(reg, n) (CP_PACKET0 | \
-+ REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
-+ REG_SET(PACKET0_COUNT, (n)))
-+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-+#define PACKET3(op, n) (CP_PACKET3 | \
-+ REG_SET(PACKET3_IT_OPCODE, (op)) | \
-+ REG_SET(PACKET3_COUNT, (n)))
-+
-+#define PACKET_TYPE0 0
-+#define PACKET_TYPE1 1
-+#define PACKET_TYPE2 2
-+#define PACKET_TYPE3 3
-+
-+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-+
-+#endif
-+
-diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515r.h
-deleted file mode 100644
-index f3cf840..0000000
---- a/drivers/gpu/drm/radeon/rv515r.h
-+++ /dev/null
-@@ -1,170 +0,0 @@
--/*
-- * Copyright 2008 Advanced Micro Devices, Inc.
-- * Copyright 2008 Red Hat Inc.
-- * Copyright 2009 Jerome Glisse.
-- *
-- * Permission is hereby granted, free of charge, to any person obtaining a
-- * copy of this software and associated documentation files (the "Software"),
-- * to deal in the Software without restriction, including without limitation
-- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-- * and/or sell copies of the Software, and to permit persons to whom the
-- * Software is furnished to do so, subject to the following conditions:
-- *
-- * The above copyright notice and this permission notice shall be included in
-- * all copies or substantial portions of the Software.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-- * OTHER DEALINGS IN THE SOFTWARE.
-- *
-- * Authors: Dave Airlie
-- * Alex Deucher
-- * Jerome Glisse
-- */
--#ifndef RV515R_H
--#define RV515R_H
--
--/* RV515 registers */
--#define PCIE_INDEX 0x0030
--#define PCIE_DATA 0x0034
--#define MC_IND_INDEX 0x0070
--#define MC_IND_WR_EN (1 << 24)
--#define MC_IND_DATA 0x0074
--#define RBBM_SOFT_RESET 0x00F0
--#define CONFIG_MEMSIZE 0x00F8
--#define HDP_FB_LOCATION 0x0134
--#define CP_CSQ_CNTL 0x0740
--#define CP_CSQ_MODE 0x0744
--#define CP_CSQ_ADDR 0x07F0
--#define CP_CSQ_DATA 0x07F4
--#define CP_CSQ_STAT 0x07F8
--#define CP_CSQ2_STAT 0x07FC
--#define RBBM_STATUS 0x0E40
--#define DST_PIPE_CONFIG 0x170C
--#define WAIT_UNTIL 0x1720
--#define WAIT_2D_IDLE (1 << 14)
--#define WAIT_3D_IDLE (1 << 15)
--#define WAIT_2D_IDLECLEAN (1 << 16)
--#define WAIT_3D_IDLECLEAN (1 << 17)
--#define ISYNC_CNTL 0x1724
--#define ISYNC_ANY2D_IDLE3D (1 << 0)
--#define ISYNC_ANY3D_IDLE2D (1 << 1)
--#define ISYNC_TRIG2D_IDLE3D (1 << 2)
--#define ISYNC_TRIG3D_IDLE2D (1 << 3)
--#define ISYNC_WAIT_IDLEGUI (1 << 4)
--#define ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
--#define VAP_INDEX_OFFSET 0x208C
--#define VAP_PVS_STATE_FLUSH_REG 0x2284
--#define GB_ENABLE 0x4008
--#define GB_MSPOS0 0x4010
--#define MS_X0_SHIFT 0
--#define MS_Y0_SHIFT 4
--#define MS_X1_SHIFT 8
--#define MS_Y1_SHIFT 12
--#define MS_X2_SHIFT 16
--#define MS_Y2_SHIFT 20
--#define MSBD0_Y_SHIFT 24
--#define MSBD0_X_SHIFT 28
--#define GB_MSPOS1 0x4014
--#define MS_X3_SHIFT 0
--#define MS_Y3_SHIFT 4
--#define MS_X4_SHIFT 8
--#define MS_Y4_SHIFT 12
--#define MS_X5_SHIFT 16
--#define MS_Y5_SHIFT 20
--#define MSBD1_SHIFT 24
--#define GB_TILE_CONFIG 0x4018
--#define ENABLE_TILING (1 << 0)
--#define PIPE_COUNT_MASK 0x0000000E
--#define PIPE_COUNT_SHIFT 1
--#define TILE_SIZE_8 (0 << 4)
--#define TILE_SIZE_16 (1 << 4)
--#define TILE_SIZE_32 (2 << 4)
--#define SUBPIXEL_1_12 (0 << 16)
--#define SUBPIXEL_1_16 (1 << 16)
--#define GB_SELECT 0x401C
--#define GB_AA_CONFIG 0x4020
--#define GB_PIPE_SELECT 0x402C
--#define GA_ENHANCE 0x4274
--#define GA_DEADLOCK_CNTL (1 << 0)
--#define GA_FASTSYNC_CNTL (1 << 1)
--#define GA_POLY_MODE 0x4288
--#define FRONT_PTYPE_POINT (0 << 4)
--#define FRONT_PTYPE_LINE (1 << 4)
--#define FRONT_PTYPE_TRIANGE (2 << 4)
--#define BACK_PTYPE_POINT (0 << 7)
--#define BACK_PTYPE_LINE (1 << 7)
--#define BACK_PTYPE_TRIANGE (2 << 7)
--#define GA_ROUND_MODE 0x428C
--#define GEOMETRY_ROUND_TRUNC (0 << 0)
--#define GEOMETRY_ROUND_NEAREST (1 << 0)
--#define COLOR_ROUND_TRUNC (0 << 2)
--#define COLOR_ROUND_NEAREST (1 << 2)
--#define SU_REG_DEST 0x42C8
--#define RB3D_DSTCACHE_CTLSTAT 0x4E4C
--#define RB3D_DC_FLUSH (2 << 0)
--#define RB3D_DC_FREE (2 << 2)
--#define RB3D_DC_FINISH (1 << 4)
--#define ZB_ZCACHE_CTLSTAT 0x4F18
--#define ZC_FLUSH (1 << 0)
--#define ZC_FREE (1 << 1)
--#define DC_LB_MEMORY_SPLIT 0x6520
--#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
--#define DC_LB_MEMORY_SPLIT_SHIFT 0
--#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
--#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
--#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
--#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
--#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
--#define DC_LB_DISP1_END_ADR_SHIFT 4
--#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
--#define D1MODE_PRIORITY_A_CNT 0x6548
--#define MODE_PRIORITY_MARK_MASK 0x00007FFF
--#define MODE_PRIORITY_OFF (1 << 16)
--#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
--#define MODE_PRIORITY_FORCE_MASK (1 << 24)
--#define D1MODE_PRIORITY_B_CNT 0x654C
--#define LB_MAX_REQ_OUTSTANDING 0x6D58
--#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
--#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
--#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
--#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
--#define D2MODE_PRIORITY_A_CNT 0x6D48
--#define D2MODE_PRIORITY_B_CNT 0x6D4C
--
--/* ix[MC] registers */
--#define MC_FB_LOCATION 0x01
--#define MC_FB_START_MASK 0x0000FFFF
--#define MC_FB_START_SHIFT 0
--#define MC_FB_TOP_MASK 0xFFFF0000
--#define MC_FB_TOP_SHIFT 16
--#define MC_AGP_LOCATION 0x02
--#define MC_AGP_START_MASK 0x0000FFFF
--#define MC_AGP_START_SHIFT 0
--#define MC_AGP_TOP_MASK 0xFFFF0000
--#define MC_AGP_TOP_SHIFT 16
--#define MC_AGP_BASE 0x03
--#define MC_AGP_BASE_2 0x04
--#define MC_CNTL 0x5
--#define MEM_NUM_CHANNELS_MASK 0x00000003
--#define MC_STATUS 0x08
--#define MC_STATUS_IDLE (1 << 4)
--#define MC_MISC_LAT_TIMER 0x09
--#define MC_CPR_INIT_LAT_MASK 0x0000000F
--#define MC_VF_INIT_LAT_MASK 0x000000F0
--#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
--#define MC_DISP0R_INIT_LAT_SHIFT 8
--#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
--#define MC_DISP1R_INIT_LAT_SHIFT 12
--#define MC_FIXED_INIT_LAT_MASK 0x000F0000
--#define MC_E2R_INIT_LAT_MASK 0x00F00000
--#define SAME_PAGE_PRIO_MASK 0x0F000000
--#define MC_GLOBW_INIT_LAT_MASK 0xF0000000
--
--
--#endif
--
-diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
-index 21d8ffd..57765f6 100644
---- a/drivers/gpu/drm/radeon/rv770.c
-+++ b/drivers/gpu/drm/radeon/rv770.c
-@@ -25,100 +25,975 @@
- * Alex Deucher
- * Jerome Glisse
- */
-+#include <linux/firmware.h>
-+#include <linux/platform_device.h>
- #include "drmP.h"
--#include "radeon_reg.h"
- #include "radeon.h"
-+#include "radeon_share.h"
-+#include "rv770d.h"
-+#include "avivod.h"
-+#include "atom.h"
-
--/* rv770,rv730,rv710 depends on : */
--void rs600_mc_disable_clients(struct radeon_device *rdev);
-+#define R700_PFP_UCODE_SIZE 848
-+#define R700_PM4_UCODE_SIZE 1360
-
--/* This files gather functions specifics to:
-- * rv770,rv730,rv710
-- *
-- * Some of these functions might be used by newer ASICs.
-- */
--int rv770_mc_wait_for_idle(struct radeon_device *rdev);
--void rv770_gpu_init(struct radeon_device *rdev);
-+static void rv770_gpu_init(struct radeon_device *rdev);
-+void rv770_fini(struct radeon_device *rdev);
-
-
- /*
-- * MC
-+ * GART
- */
--int rv770_mc_init(struct radeon_device *rdev)
-+int rv770_pcie_gart_enable(struct radeon_device *rdev)
- {
-- uint32_t tmp;
-+ u32 tmp;
-+ int r, i;
-
-- rv770_gpu_init(rdev);
-+ /* Initialize common gart structure */
-+ r = radeon_gart_init(rdev);
-+ if (r) {
-+ return r;
-+ }
-+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
-+ r = radeon_gart_table_vram_alloc(rdev);
-+ if (r) {
-+ return r;
-+ }
-+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
-+ r600_gart_clear_page(rdev, i);
-+ /* Setup L2 cache */
-+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
-+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
-+ EFFECTIVE_L2_QUEUE_SIZE(7));
-+ WREG32(VM_L2_CNTL2, 0);
-+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
-+ /* Setup TLB control */
-+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
-+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
-+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
-+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
-+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
-+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
-+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
-+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
-+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
-+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
-+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
-+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
-+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
-+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
-+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
-+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
-+ (u32)(rdev->dummy_page.addr >> 12));
-+ for (i = 1; i < 7; i++)
-+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
-
-- /* setup the gart before changing location so we can ask to
-- * discard unmapped mc request
-- */
-- /* FIXME: disable out of gart access */
-- tmp = rdev->mc.gtt_location / 4096;
-- tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
-- WREG32(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
-- tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
-- tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
-- WREG32(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
--
-- rs600_mc_disable_clients(rdev);
-- if (rv770_mc_wait_for_idle(rdev)) {
-- printk(KERN_WARNING "Failed to wait MC idle while "
-- "programming pipes. Bad things might happen.\n");
-- }
--
-- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-- tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
-- tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
-- WREG32(R700_MC_VM_FB_LOCATION, tmp);
-- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
-- tmp = REG_SET(R700_MC_AGP_TOP, tmp >> 22);
-- WREG32(R700_MC_VM_AGP_TOP, tmp);
-- tmp = REG_SET(R700_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
-- WREG32(R700_MC_VM_AGP_BOT, tmp);
-+ r600_pcie_gart_tlb_flush(rdev);
-+ rdev->gart.ready = true;
- return 0;
- }
-
--void rv770_mc_fini(struct radeon_device *rdev)
-+void rv770_pcie_gart_disable(struct radeon_device *rdev)
- {
-- /* FIXME: implement */
-+ u32 tmp;
-+ int i;
-+
-+ /* Clear ptes*/
-+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
-+ r600_gart_clear_page(rdev, i);
-+ r600_pcie_gart_tlb_flush(rdev);
-+ /* Disable all tables */
-+ for (i = 0; i < 7; i++)
-+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
-+
-+ /* Setup L2 cache */
-+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
-+ EFFECTIVE_L2_QUEUE_SIZE(7));
-+ WREG32(VM_L2_CNTL2, 0);
-+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
-+ /* Setup TLB control */
-+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
-+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
-+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
-+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
-+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
-+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
-+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
-+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- }
-
-
- /*
-- * Global GPU functions
-+ * MC
- */
--void rv770_errata(struct radeon_device *rdev)
-+static void rv770_mc_resume(struct radeon_device *rdev)
- {
-- rdev->pll_errata = 0;
-+ u32 d1vga_control, d2vga_control;
-+ u32 vga_render_control, vga_hdp_control;
-+ u32 d1crtc_control, d2crtc_control;
-+ u32 new_d1grph_primary, new_d1grph_secondary;
-+ u32 new_d2grph_primary, new_d2grph_secondary;
-+ u64 old_vram_start;
-+ u32 tmp;
-+ int i, j;
-+
-+ /* Initialize HDP */
-+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
-+ WREG32((0x2c14 + j), 0x00000000);
-+ WREG32((0x2c18 + j), 0x00000000);
-+ WREG32((0x2c1c + j), 0x00000000);
-+ WREG32((0x2c20 + j), 0x00000000);
-+ WREG32((0x2c24 + j), 0x00000000);
-+ }
-+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
-+
-+ d1vga_control = RREG32(D1VGA_CONTROL);
-+ d2vga_control = RREG32(D2VGA_CONTROL);
-+ vga_render_control = RREG32(VGA_RENDER_CONTROL);
-+ vga_hdp_control = RREG32(VGA_HDP_CONTROL);
-+ d1crtc_control = RREG32(D1CRTC_CONTROL);
-+ d2crtc_control = RREG32(D2CRTC_CONTROL);
-+ old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
-+ new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
-+ new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
-+ new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
-+ new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
-+ new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
-+ new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
-+ new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
-+ new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
-+
-+ /* Stop all video */
-+ WREG32(D1VGA_CONTROL, 0);
-+ WREG32(D2VGA_CONTROL, 0);
-+ WREG32(VGA_RENDER_CONTROL, 0);
-+ WREG32(D1CRTC_UPDATE_LOCK, 1);
-+ WREG32(D2CRTC_UPDATE_LOCK, 1);
-+ WREG32(D1CRTC_CONTROL, 0);
-+ WREG32(D2CRTC_CONTROL, 0);
-+ WREG32(D1CRTC_UPDATE_LOCK, 0);
-+ WREG32(D2CRTC_UPDATE_LOCK, 0);
-+
-+ mdelay(1);
-+ if (r600_mc_wait_for_idle(rdev)) {
-+ printk(KERN_WARNING "[drm] MC not idle !\n");
-+ }
-+
-+ /* Lockout access through VGA aperture*/
-+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
-+
-+ /* Update configuration */
-+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
-+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
-+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
-+ tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
-+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
-+ WREG32(MC_VM_FB_LOCATION, tmp);
-+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
-+ WREG32(HDP_NONSURFACE_INFO, (2 << 7));
-+ WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
-+ if (rdev->flags & RADEON_IS_AGP) {
-+ WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
-+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
-+ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
-+ } else {
-+ WREG32(MC_VM_AGP_BASE, 0);
-+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
-+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
-+ }
-+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
-+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
-+ WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
-+ WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
-+ WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-+
-+ /* Unlock host access */
-+ WREG32(VGA_HDP_CONTROL, vga_hdp_control);
-+
-+ mdelay(1);
-+ if (r600_mc_wait_for_idle(rdev)) {
-+ printk(KERN_WARNING "[drm] MC not idle !\n");
-+ }
-+
-+ /* Restore video state */
-+ WREG32(D1CRTC_UPDATE_LOCK, 1);
-+ WREG32(D2CRTC_UPDATE_LOCK, 1);
-+ WREG32(D1CRTC_CONTROL, d1crtc_control);
-+ WREG32(D2CRTC_CONTROL, d2crtc_control);
-+ WREG32(D1CRTC_UPDATE_LOCK, 0);
-+ WREG32(D2CRTC_UPDATE_LOCK, 0);
-+ WREG32(D1VGA_CONTROL, d1vga_control);
-+ WREG32(D2VGA_CONTROL, d2vga_control);
-+ WREG32(VGA_RENDER_CONTROL, vga_render_control);
- }
-
--int rv770_mc_wait_for_idle(struct radeon_device *rdev)
-+
-+/*
-+ * CP.
-+ */
-+void r700_cp_stop(struct radeon_device *rdev)
- {
-- /* FIXME: implement */
-- return 0;
-+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
- }
-
--void rv770_gpu_init(struct radeon_device *rdev)
-+
-+static int rv770_cp_load_microcode(struct radeon_device *rdev)
- {
-- /* FIXME: implement */
-+ const __be32 *fw_data;
-+ int i;
-+
-+ if (!rdev->me_fw || !rdev->pfp_fw)
-+ return -EINVAL;
-+
-+ r700_cp_stop(rdev);
-+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
-+
-+ /* Reset cp */
-+ WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
-+ RREG32(GRBM_SOFT_RESET);
-+ mdelay(15);
-+ WREG32(GRBM_SOFT_RESET, 0);
-+
-+ fw_data = (const __be32 *)rdev->pfp_fw->data;
-+ WREG32(CP_PFP_UCODE_ADDR, 0);
-+ for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
-+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
-+ WREG32(CP_PFP_UCODE_ADDR, 0);
-+
-+ fw_data = (const __be32 *)rdev->me_fw->data;
-+ WREG32(CP_ME_RAM_WADDR, 0);
-+ for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
-+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
-+
-+ WREG32(CP_PFP_UCODE_ADDR, 0);
-+ WREG32(CP_ME_RAM_WADDR, 0);
-+ WREG32(CP_ME_RAM_RADDR, 0);
-+ return 0;
- }
-
-
- /*
-- * VRAM info
-+ * Core functions
- */
--void rv770_vram_get_type(struct radeon_device *rdev)
-+static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
-+ u32 num_backends,
-+ u32 backend_disable_mask)
- {
-- /* FIXME: implement */
-+ u32 backend_map = 0;
-+ u32 enabled_backends_mask;
-+ u32 enabled_backends_count;
-+ u32 cur_pipe;
-+ u32 swizzle_pipe[R7XX_MAX_PIPES];
-+ u32 cur_backend;
-+ u32 i;
-+
-+ if (num_tile_pipes > R7XX_MAX_PIPES)
-+ num_tile_pipes = R7XX_MAX_PIPES;
-+ if (num_tile_pipes < 1)
-+ num_tile_pipes = 1;
-+ if (num_backends > R7XX_MAX_BACKENDS)
-+ num_backends = R7XX_MAX_BACKENDS;
-+ if (num_backends < 1)
-+ num_backends = 1;
-+
-+ enabled_backends_mask = 0;
-+ enabled_backends_count = 0;
-+ for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
-+ if (((backend_disable_mask >> i) & 1) == 0) {
-+ enabled_backends_mask |= (1 << i);
-+ ++enabled_backends_count;
-+ }
-+ if (enabled_backends_count == num_backends)
-+ break;
-+ }
-+
-+ if (enabled_backends_count == 0) {
-+ enabled_backends_mask = 1;
-+ enabled_backends_count = 1;
-+ }
-+
-+ if (enabled_backends_count != num_backends)
-+ num_backends = enabled_backends_count;
-+
-+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
-+ switch (num_tile_pipes) {
-+ case 1:
-+ swizzle_pipe[0] = 0;
-+ break;
-+ case 2:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 1;
-+ break;
-+ case 3:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 1;
-+ break;
-+ case 4:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 3;
-+ swizzle_pipe[3] = 1;
-+ break;
-+ case 5:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 4;
-+ swizzle_pipe[3] = 1;
-+ swizzle_pipe[4] = 3;
-+ break;
-+ case 6:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 4;
-+ swizzle_pipe[3] = 5;
-+ swizzle_pipe[4] = 3;
-+ swizzle_pipe[5] = 1;
-+ break;
-+ case 7:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 4;
-+ swizzle_pipe[3] = 6;
-+ swizzle_pipe[4] = 3;
-+ swizzle_pipe[5] = 1;
-+ swizzle_pipe[6] = 5;
-+ break;
-+ case 8:
-+ swizzle_pipe[0] = 0;
-+ swizzle_pipe[1] = 2;
-+ swizzle_pipe[2] = 4;
-+ swizzle_pipe[3] = 6;
-+ swizzle_pipe[4] = 3;
-+ swizzle_pipe[5] = 1;
-+ swizzle_pipe[6] = 7;
-+ swizzle_pipe[7] = 5;
-+ break;
-+ }
-+
-+ cur_backend = 0;
-+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
-+ cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-+
-+ backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-+
-+ cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-+ }
-+
-+ return backend_map;
- }
-
--void rv770_vram_info(struct radeon_device *rdev)
-+static void rv770_gpu_init(struct radeon_device *rdev)
- {
-- rv770_vram_get_type(rdev);
-+ int i, j, num_qd_pipes;
-+ u32 sx_debug_1;
-+ u32 smx_dc_ctl0;
-+ u32 num_gs_verts_per_thread;
-+ u32 vgt_gs_per_es;
-+ u32 gs_prim_buffer_depth = 0;
-+ u32 sq_ms_fifo_sizes;
-+ u32 sq_config;
-+ u32 sq_thread_resource_mgmt;
-+ u32 hdp_host_path_cntl;
-+ u32 sq_dyn_gpr_size_simd_ab_0;
-+ u32 backend_map;
-+ u32 gb_tiling_config = 0;
-+ u32 cc_rb_backend_disable = 0;
-+ u32 cc_gc_shader_pipe_config = 0;
-+ u32 mc_arb_ramcfg;
-+ u32 db_debug4;
-
-- /* FIXME: implement */
-+ /* setup chip specs */
-+ switch (rdev->family) {
-+ case CHIP_RV770:
-+ rdev->config.rv770.max_pipes = 4;
-+ rdev->config.rv770.max_tile_pipes = 8;
-+ rdev->config.rv770.max_simds = 10;
-+ rdev->config.rv770.max_backends = 4;
-+ rdev->config.rv770.max_gprs = 256;
-+ rdev->config.rv770.max_threads = 248;
-+ rdev->config.rv770.max_stack_entries = 512;
-+ rdev->config.rv770.max_hw_contexts = 8;
-+ rdev->config.rv770.max_gs_threads = 16 * 2;
-+ rdev->config.rv770.sx_max_export_size = 128;
-+ rdev->config.rv770.sx_max_export_pos_size = 16;
-+ rdev->config.rv770.sx_max_export_smx_size = 112;
-+ rdev->config.rv770.sq_num_cf_insts = 2;
-+
-+ rdev->config.rv770.sx_num_of_sets = 7;
-+ rdev->config.rv770.sc_prim_fifo_size = 0xF9;
-+ rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
-+ rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
-+ break;
-+ case CHIP_RV730:
-+ rdev->config.rv770.max_pipes = 2;
-+ rdev->config.rv770.max_tile_pipes = 4;
-+ rdev->config.rv770.max_simds = 8;
-+ rdev->config.rv770.max_backends = 2;
-+ rdev->config.rv770.max_gprs = 128;
-+ rdev->config.rv770.max_threads = 248;
-+ rdev->config.rv770.max_stack_entries = 256;
-+ rdev->config.rv770.max_hw_contexts = 8;
-+ rdev->config.rv770.max_gs_threads = 16 * 2;
-+ rdev->config.rv770.sx_max_export_size = 256;
-+ rdev->config.rv770.sx_max_export_pos_size = 32;
-+ rdev->config.rv770.sx_max_export_smx_size = 224;
-+ rdev->config.rv770.sq_num_cf_insts = 2;
-+
-+ rdev->config.rv770.sx_num_of_sets = 7;
-+ rdev->config.rv770.sc_prim_fifo_size = 0xf9;
-+ rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
-+ rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
-+ if (rdev->config.rv770.sx_max_export_pos_size > 16) {
-+ rdev->config.rv770.sx_max_export_pos_size -= 16;
-+ rdev->config.rv770.sx_max_export_smx_size += 16;
-+ }
-+ break;
-+ case CHIP_RV710:
-+ rdev->config.rv770.max_pipes = 2;
-+ rdev->config.rv770.max_tile_pipes = 2;
-+ rdev->config.rv770.max_simds = 2;
-+ rdev->config.rv770.max_backends = 1;
-+ rdev->config.rv770.max_gprs = 256;
-+ rdev->config.rv770.max_threads = 192;
-+ rdev->config.rv770.max_stack_entries = 256;
-+ rdev->config.rv770.max_hw_contexts = 4;
-+ rdev->config.rv770.max_gs_threads = 8 * 2;
-+ rdev->config.rv770.sx_max_export_size = 128;
-+ rdev->config.rv770.sx_max_export_pos_size = 16;
-+ rdev->config.rv770.sx_max_export_smx_size = 112;
-+ rdev->config.rv770.sq_num_cf_insts = 1;
-+
-+ rdev->config.rv770.sx_num_of_sets = 7;
-+ rdev->config.rv770.sc_prim_fifo_size = 0x40;
-+ rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
-+ rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
-+ break;
-+ case CHIP_RV740:
-+ rdev->config.rv770.max_pipes = 4;
-+ rdev->config.rv770.max_tile_pipes = 4;
-+ rdev->config.rv770.max_simds = 8;
-+ rdev->config.rv770.max_backends = 4;
-+ rdev->config.rv770.max_gprs = 256;
-+ rdev->config.rv770.max_threads = 248;
-+ rdev->config.rv770.max_stack_entries = 512;
-+ rdev->config.rv770.max_hw_contexts = 8;
-+ rdev->config.rv770.max_gs_threads = 16 * 2;
-+ rdev->config.rv770.sx_max_export_size = 256;
-+ rdev->config.rv770.sx_max_export_pos_size = 32;
-+ rdev->config.rv770.sx_max_export_smx_size = 224;
-+ rdev->config.rv770.sq_num_cf_insts = 2;
-+
-+ rdev->config.rv770.sx_num_of_sets = 7;
-+ rdev->config.rv770.sc_prim_fifo_size = 0x100;
-+ rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
-+ rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
-+
-+ if (rdev->config.rv770.sx_max_export_pos_size > 16) {
-+ rdev->config.rv770.sx_max_export_pos_size -= 16;
-+ rdev->config.rv770.sx_max_export_smx_size += 16;
-+ }
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ /* Initialize HDP */
-+ j = 0;
-+ for (i = 0; i < 32; i++) {
-+ WREG32((0x2c14 + j), 0x00000000);
-+ WREG32((0x2c18 + j), 0x00000000);
-+ WREG32((0x2c1c + j), 0x00000000);
-+ WREG32((0x2c20 + j), 0x00000000);
-+ WREG32((0x2c24 + j), 0x00000000);
-+ j += 0x18;
-+ }
-+
-+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
-+
-+ /* setup tiling, simd, pipe config */
-+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
-+
-+ switch (rdev->config.rv770.max_tile_pipes) {
-+ case 1:
-+ gb_tiling_config |= PIPE_TILING(0);
-+ break;
-+ case 2:
-+ gb_tiling_config |= PIPE_TILING(1);
-+ break;
-+ case 4:
-+ gb_tiling_config |= PIPE_TILING(2);
-+ break;
-+ case 8:
-+ gb_tiling_config |= PIPE_TILING(3);
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ if (rdev->family == CHIP_RV770)
-+ gb_tiling_config |= BANK_TILING(1);
-+ else
-+ gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK);
-+
-+ gb_tiling_config |= GROUP_SIZE(0);
-+
-+ if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) {
-+ gb_tiling_config |= ROW_TILING(3);
-+ gb_tiling_config |= SAMPLE_SPLIT(3);
-+ } else {
-+ gb_tiling_config |=
-+ ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
-+ gb_tiling_config |=
-+ SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
-+ }
-+
-+ gb_tiling_config |= BANK_SWAPS(1);
-+
-+ backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
-+ rdev->config.rv770.max_backends,
-+ (0xff << rdev->config.rv770.max_backends) & 0xff);
-+ gb_tiling_config |= BACKEND_MAP(backend_map);
-+
-+ cc_gc_shader_pipe_config =
-+ INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
-+ cc_gc_shader_pipe_config |=
-+ INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
-+
-+ cc_rb_backend_disable =
-+ BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
-+
-+ WREG32(GB_TILING_CONFIG, gb_tiling_config);
-+ WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
-+ WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
-+
-+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-+ WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-+
-+ WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-+ WREG32(CGTS_SYS_TCC_DISABLE, 0);
-+ WREG32(CGTS_TCC_DISABLE, 0);
-+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
-+ WREG32(CGTS_USER_TCC_DISABLE, 0);
-+
-+ num_qd_pipes =
-+ R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK);
-+ WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
-+ WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
-+
-+ /* set HW defaults for 3D engine */
-+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
-+ ROQ_IB2_START(0x2b)));
-+
-+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
-+
-+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
-+ SYNC_GRADIENT |
-+ SYNC_WALKER |
-+ SYNC_ALIGNER));
-+
-+ sx_debug_1 = RREG32(SX_DEBUG_1);
-+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
-+ WREG32(SX_DEBUG_1, sx_debug_1);
-+
-+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
-+ smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
-+ smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
-+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
-+
-+ WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
-+ GS_FLUSH_CTL(4) |
-+ ACK_FLUSH_CTL(3) |
-+ SYNC_FLUSH_CTL));
-+
-+ if (rdev->family == CHIP_RV770)
-+ WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
-+ else {
-+ db_debug4 = RREG32(DB_DEBUG4);
-+ db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
-+ WREG32(DB_DEBUG4, db_debug4);
-+ }
-+
-+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
-+ POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
-+ SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
-+
-+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
-+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
-+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
-+
-+ WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
-+
-+ WREG32(VGT_NUM_INSTANCES, 1);
-+
-+ WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
-+
-+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
-+
-+ WREG32(CP_PERFMON_CNTL, 0);
-+
-+ sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
-+ DONE_FIFO_HIWATER(0xe0) |
-+ ALU_UPDATE_FIFO_HIWATER(0x8));
-+ switch (rdev->family) {
-+ case CHIP_RV770:
-+ sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
-+ break;
-+ case CHIP_RV730:
-+ case CHIP_RV710:
-+ case CHIP_RV740:
-+ default:
-+ sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
-+ break;
-+ }
-+ WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
-+
-+ /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
-+ * should be adjusted as needed by the 2D/3D drivers. This just sets default values
-+ */
-+ sq_config = RREG32(SQ_CONFIG);
-+ sq_config &= ~(PS_PRIO(3) |
-+ VS_PRIO(3) |
-+ GS_PRIO(3) |
-+ ES_PRIO(3));
-+ sq_config |= (DX9_CONSTS |
-+ VC_ENABLE |
-+ EXPORT_SRC_C |
-+ PS_PRIO(0) |
-+ VS_PRIO(1) |
-+ GS_PRIO(2) |
-+ ES_PRIO(3));
-+ if (rdev->family == CHIP_RV710)
-+ /* no vertex cache */
-+ sq_config &= ~VC_ENABLE;
-+
-+ WREG32(SQ_CONFIG, sq_config);
-+
-+ WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
-+ NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
-+ NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
-+
-+ WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
-+ NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
-+
-+ sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
-+ NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
-+ NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
-+ if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
-+ sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
-+ else
-+ sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
-+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
-+
-+ WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
-+ NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
-+
-+ WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
-+ NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
-+
-+ sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
-+ SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
-+ SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
-+ SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
-+
-+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
-+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
-+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
-+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
-+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
-+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
-+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
-+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
-+
-+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
-+ FORCE_EOV_MAX_REZ_CNT(255)));
-+
-+ if (rdev->family == CHIP_RV710)
-+ WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
-+ AUTO_INVLD_EN(ES_AND_GS_AUTO)));
-+ else
-+ WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
-+ AUTO_INVLD_EN(ES_AND_GS_AUTO)));
-+
-+ switch (rdev->family) {
-+ case CHIP_RV770:
-+ case CHIP_RV730:
-+ case CHIP_RV740:
-+ gs_prim_buffer_depth = 384;
-+ break;
-+ case CHIP_RV710:
-+ gs_prim_buffer_depth = 128;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
-+ vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
-+ /* Max value for this is 256 */
-+ if (vgt_gs_per_es > 256)
-+ vgt_gs_per_es = 256;
-+
-+ WREG32(VGT_ES_PER_GS, 128);
-+ WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
-+ WREG32(VGT_GS_PER_VS, 2);
-+
-+ /* more default values. 2D/3D driver should adjust as needed */
-+ WREG32(VGT_GS_VERTEX_REUSE, 16);
-+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
-+ WREG32(VGT_STRMOUT_EN, 0);
-+ WREG32(SX_MISC, 0);
-+ WREG32(PA_SC_MODE_CNTL, 0);
-+ WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
-+ WREG32(PA_SC_AA_CONFIG, 0);
-+ WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
-+ WREG32(PA_SC_LINE_STIPPLE, 0);
-+ WREG32(SPI_INPUT_Z, 0);
-+ WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
-+ WREG32(CB_COLOR7_FRAG, 0);
-+
-+ /* clear render buffer base addresses */
-+ WREG32(CB_COLOR0_BASE, 0);
-+ WREG32(CB_COLOR1_BASE, 0);
-+ WREG32(CB_COLOR2_BASE, 0);
-+ WREG32(CB_COLOR3_BASE, 0);
-+ WREG32(CB_COLOR4_BASE, 0);
-+ WREG32(CB_COLOR5_BASE, 0);
-+ WREG32(CB_COLOR6_BASE, 0);
-+ WREG32(CB_COLOR7_BASE, 0);
-+
-+ WREG32(TCP_CNTL, 0);
-+
-+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
-+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
-+
-+ WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
-+
-+ WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
-+ NUM_CLIP_SEQ(3)));
-+
-+}
-+
-+int rv770_mc_init(struct radeon_device *rdev)
-+{
-+ fixed20_12 a;
-+ u32 tmp;
-+ int r;
-+
-+ /* Get VRAM informations */
-+ /* FIXME: Don't know how to determine vram width, need to check
-+ * vram_width usage
-+ */
-+ rdev->mc.vram_width = 128;
-+ rdev->mc.vram_is_ddr = true;
- /* Could aper size report 0 ? */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-+ /* Setup GPU memory space */
-+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
-+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
-+ if (rdev->flags & RADEON_IS_AGP) {
-+ r = radeon_agp_init(rdev);
-+ if (r)
-+ return r;
-+ /* gtt_size is setup by radeon_agp_init */
-+ rdev->mc.gtt_location = rdev->mc.agp_base;
-+ tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
-+ /* Try to put vram before or after AGP because we
-+ * we want SYSTEM_APERTURE to cover both VRAM and
-+ * AGP so that GPU can catch out of VRAM/AGP access
-+ */
-+ if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
-+ /* Enought place before */
-+ rdev->mc.vram_location = rdev->mc.gtt_location -
-+ rdev->mc.mc_vram_size;
-+ } else if (tmp > rdev->mc.mc_vram_size) {
-+ /* Enought place after */
-+ rdev->mc.vram_location = rdev->mc.gtt_location +
-+ rdev->mc.gtt_size;
-+ } else {
-+ /* Try to setup VRAM then AGP might not
-+ * not work on some card
-+ */
-+ rdev->mc.vram_location = 0x00000000UL;
-+ rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-+ }
-+ } else {
-+ rdev->mc.vram_location = 0x00000000UL;
-+ rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-+ }
-+ rdev->mc.vram_start = rdev->mc.vram_location;
-+ rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
-+ rdev->mc.gtt_start = rdev->mc.gtt_location;
-+ rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
-+ /* FIXME: we should enforce default clock in case GPU is not in
-+ * default setup
-+ */
-+ a.full = rfixed_const(100);
-+ rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
-+ rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-+ return 0;
-+}
-+int rv770_gpu_reset(struct radeon_device *rdev)
-+{
-+ /* FIXME: implement */
-+ return 0;
-+}
-+
-+int rv770_resume(struct radeon_device *rdev)
-+{
-+ int r;
-+
-+ rv770_mc_resume(rdev);
-+ r = rv770_pcie_gart_enable(rdev);
-+ if (r)
-+ return r;
-+ rv770_gpu_init(rdev);
-+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
-+ if (r)
-+ return r;
-+ r = rv770_cp_load_microcode(rdev);
-+ if (r)
-+ return r;
-+ r = r600_cp_resume(rdev);
-+ if (r)
-+ return r;
-+ r = r600_wb_init(rdev);
-+ if (r)
-+ return r;
-+ return 0;
-+}
-+
-+int rv770_suspend(struct radeon_device *rdev)
-+{
-+ /* FIXME: we should wait for ring to be empty */
-+ r700_cp_stop(rdev);
-+ return 0;
-+}
-+
-+/* Plan is to move initialization in that function and use
-+ * helper function so that radeon_device_init pretty much
-+ * do nothing more than calling asic specific function. This
-+ * should also allow to remove a bunch of callback function
-+ * like vram_info.
-+ */
-+int rv770_init(struct radeon_device *rdev)
-+{
-+ int r;
-+
-+ rdev->new_init_path = true;
-+ r = radeon_dummy_page_init(rdev);
-+ if (r)
-+ return r;
-+ /* This don't do much */
-+ r = radeon_gem_init(rdev);
-+ if (r)
-+ return r;
-+ /* Read BIOS */
-+ if (!radeon_get_bios(rdev)) {
-+ if (ASIC_IS_AVIVO(rdev))
-+ return -EINVAL;
-+ }
-+ /* Must be an ATOMBIOS */
-+ if (!rdev->is_atom_bios)
-+ return -EINVAL;
-+ r = radeon_atombios_init(rdev);
-+ if (r)
-+ return r;
-+ /* Post card if necessary */
-+ if (!r600_card_posted(rdev) && rdev->bios) {
-+ DRM_INFO("GPU not posted. posting now...\n");
-+ atom_asic_init(rdev->mode_info.atom_context);
-+ }
-+ /* Initialize scratch registers */
-+ r600_scratch_init(rdev);
-+ /* Initialize surface registers */
-+ radeon_surface_init(rdev);
-+ r = radeon_clocks_init(rdev);
-+ if (r)
-+ return r;
-+ /* Fence driver */
-+ r = radeon_fence_driver_init(rdev);
-+ if (r)
-+ return r;
-+ r = rv770_mc_init(rdev);
-+ if (r) {
-+ if (rdev->flags & RADEON_IS_AGP) {
-+ /* Retry with disabling AGP */
-+ rv770_fini(rdev);
-+ rdev->flags &= ~RADEON_IS_AGP;
-+ return rv770_init(rdev);
-+ }
-+ return r;
-+ }
-+ /* Memory manager */
-+ r = radeon_object_init(rdev);
-+ if (r)
-+ return r;
-+ rdev->cp.ring_obj = NULL;
-+ r600_ring_init(rdev, 1024 * 1024);
-+
-+ if (!rdev->me_fw || !rdev->pfp_fw) {
-+ r = r600_cp_init_microcode(rdev);
-+ if (r) {
-+ DRM_ERROR("Failed to load firmware!\n");
-+ return r;
-+ }
-+ }
-+
-+ r = rv770_resume(rdev);
-+ if (r) {
-+ if (rdev->flags & RADEON_IS_AGP) {
-+ /* Retry with disabling AGP */
-+ rv770_fini(rdev);
-+ rdev->flags &= ~RADEON_IS_AGP;
-+ return rv770_init(rdev);
-+ }
-+ return r;
-+ }
-+ r = r600_blit_init(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled blitter (%d).\n", r);
-+ return r;
-+ }
-+ r = radeon_ib_pool_init(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
-+ return r;
-+ }
-+ r = radeon_ib_test(rdev);
-+ if (r) {
-+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
-+ return r;
-+ }
-+ return 0;
-+}
-+
-+void rv770_fini(struct radeon_device *rdev)
-+{
-+ r600_blit_fini(rdev);
-+ radeon_ring_fini(rdev);
-+ rv770_pcie_gart_disable(rdev);
-+ radeon_gart_table_vram_free(rdev);
-+ radeon_gart_fini(rdev);
-+ radeon_gem_fini(rdev);
-+ radeon_fence_driver_fini(rdev);
-+ radeon_clocks_fini(rdev);
-+#if __OS_HAS_AGP
-+ if (rdev->flags & RADEON_IS_AGP)
-+ radeon_agp_fini(rdev);
-+#endif
-+ radeon_object_fini(rdev);
-+ if (rdev->is_atom_bios) {
-+ radeon_atombios_fini(rdev);
-+ } else {
-+ radeon_combios_fini(rdev);
-+ }
-+ kfree(rdev->bios);
-+ rdev->bios = NULL;
-+ radeon_dummy_page_fini(rdev);
- }
-diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
-new file mode 100644
-index 0000000..4b9c3d6
---- /dev/null
-+++ b/drivers/gpu/drm/radeon/rv770d.h
-@@ -0,0 +1,341 @@
-+/*
-+ * Copyright 2009 Advanced Micro Devices, Inc.
-+ * Copyright 2009 Red Hat Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: Dave Airlie
-+ * Alex Deucher
-+ * Jerome Glisse
-+ */
-+#ifndef RV770_H
-+#define RV770_H
-+
-+#define R7XX_MAX_SH_GPRS 256
-+#define R7XX_MAX_TEMP_GPRS 16
-+#define R7XX_MAX_SH_THREADS 256
-+#define R7XX_MAX_SH_STACK_ENTRIES 4096
-+#define R7XX_MAX_BACKENDS 8
-+#define R7XX_MAX_BACKENDS_MASK 0xff
-+#define R7XX_MAX_SIMDS 16
-+#define R7XX_MAX_SIMDS_MASK 0xffff
-+#define R7XX_MAX_PIPES 8
-+#define R7XX_MAX_PIPES_MASK 0xff
-+
-+/* Registers */
-+#define CB_COLOR0_BASE 0x28040
-+#define CB_COLOR1_BASE 0x28044
-+#define CB_COLOR2_BASE 0x28048
-+#define CB_COLOR3_BASE 0x2804C
-+#define CB_COLOR4_BASE 0x28050
-+#define CB_COLOR5_BASE 0x28054
-+#define CB_COLOR6_BASE 0x28058
-+#define CB_COLOR7_BASE 0x2805C
-+#define CB_COLOR7_FRAG 0x280FC
-+
-+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
-+#define CC_RB_BACKEND_DISABLE 0x98F4
-+#define BACKEND_DISABLE(x) ((x) << 16)
-+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
-+
-+#define CGTS_SYS_TCC_DISABLE 0x3F90
-+#define CGTS_TCC_DISABLE 0x9148
-+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
-+#define CGTS_USER_TCC_DISABLE 0x914C
-+
-+#define CONFIG_MEMSIZE 0x5428
-+
-+#define CP_ME_CNTL 0x86D8
-+#define CP_ME_HALT (1<<28)
-+#define CP_PFP_HALT (1<<26)
-+#define CP_ME_RAM_DATA 0xC160
-+#define CP_ME_RAM_RADDR 0xC158
-+#define CP_ME_RAM_WADDR 0xC15C
-+#define CP_MEQ_THRESHOLDS 0x8764
-+#define STQ_SPLIT(x) ((x) << 0)
-+#define CP_PERFMON_CNTL 0x87FC
-+#define CP_PFP_UCODE_ADDR 0xC150
-+#define CP_PFP_UCODE_DATA 0xC154
-+#define CP_QUEUE_THRESHOLDS 0x8760
-+#define ROQ_IB1_START(x) ((x) << 0)
-+#define ROQ_IB2_START(x) ((x) << 8)
-+#define CP_RB_CNTL 0xC104
-+#define RB_BUFSZ(x) ((x)<<0)
-+#define RB_BLKSZ(x) ((x)<<8)
-+#define RB_NO_UPDATE (1<<27)
-+#define RB_RPTR_WR_ENA (1<<31)
-+#define BUF_SWAP_32BIT (2 << 16)
-+#define CP_RB_RPTR 0x8700
-+#define CP_RB_RPTR_ADDR 0xC10C
-+#define CP_RB_RPTR_ADDR_HI 0xC110
-+#define CP_RB_RPTR_WR 0xC108
-+#define CP_RB_WPTR 0xC114
-+#define CP_RB_WPTR_ADDR 0xC118
-+#define CP_RB_WPTR_ADDR_HI 0xC11C
-+#define CP_RB_WPTR_DELAY 0x8704
-+#define CP_SEM_WAIT_TIMER 0x85BC
-+
-+#define DB_DEBUG3 0x98B0
-+#define DB_CLK_OFF_DELAY(x) ((x) << 11)
-+#define DB_DEBUG4 0x9B8C
-+#define DISABLE_TILE_COVERED_FOR_PS_ITER (1 << 6)
-+
-+#define DCP_TILING_CONFIG 0x6CA0
-+#define PIPE_TILING(x) ((x) << 1)
-+#define BANK_TILING(x) ((x) << 4)
-+#define GROUP_SIZE(x) ((x) << 6)
-+#define ROW_TILING(x) ((x) << 8)
-+#define BANK_SWAPS(x) ((x) << 11)
-+#define SAMPLE_SPLIT(x) ((x) << 14)
-+#define BACKEND_MAP(x) ((x) << 16)
-+
-+#define GB_TILING_CONFIG 0x98F0
-+
-+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
-+#define INACTIVE_QD_PIPES(x) ((x) << 8)
-+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
-+#define INACTIVE_SIMDS(x) ((x) << 16)
-+#define INACTIVE_SIMDS_MASK 0x00FF0000
-+
-+#define GRBM_CNTL 0x8000
-+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
-+#define GRBM_SOFT_RESET 0x8020
-+#define SOFT_RESET_CP (1<<0)
-+#define GRBM_STATUS 0x8010
-+#define CMDFIFO_AVAIL_MASK 0x0000000F
-+#define GUI_ACTIVE (1<<31)
-+#define GRBM_STATUS2 0x8014
-+
-+#define HDP_HOST_PATH_CNTL 0x2C00
-+#define HDP_NONSURFACE_BASE 0x2C04
-+#define HDP_NONSURFACE_INFO 0x2C08
-+#define HDP_NONSURFACE_SIZE 0x2C0C
-+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
-+#define HDP_TILING_CONFIG 0x2F3C
-+
-+#define MC_ARB_RAMCFG 0x2760
-+#define NOOFBANK_SHIFT 0
-+#define NOOFBANK_MASK 0x00000003
-+#define NOOFRANK_SHIFT 2
-+#define NOOFRANK_MASK 0x00000004
-+#define NOOFROWS_SHIFT 3
-+#define NOOFROWS_MASK 0x00000038
-+#define NOOFCOLS_SHIFT 6
-+#define NOOFCOLS_MASK 0x000000C0
-+#define CHANSIZE_SHIFT 8
-+#define CHANSIZE_MASK 0x00000100
-+#define BURSTLENGTH_SHIFT 9
-+#define BURSTLENGTH_MASK 0x00000200
-+#define MC_VM_AGP_TOP 0x2028
-+#define MC_VM_AGP_BOT 0x202C
-+#define MC_VM_AGP_BASE 0x2030
-+#define MC_VM_FB_LOCATION 0x2024
-+#define MC_VM_MB_L1_TLB0_CNTL 0x2234
-+#define MC_VM_MB_L1_TLB1_CNTL 0x2238
-+#define MC_VM_MB_L1_TLB2_CNTL 0x223C
-+#define MC_VM_MB_L1_TLB3_CNTL 0x2240
-+#define ENABLE_L1_TLB (1 << 0)
-+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
-+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
-+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
-+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
-+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
-+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
-+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
-+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
-+#define MC_VM_MD_L1_TLB0_CNTL 0x2654
-+#define MC_VM_MD_L1_TLB1_CNTL 0x2658
-+#define MC_VM_MD_L1_TLB2_CNTL 0x265C
-+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
-+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
-+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
-+
-+#define PA_CL_ENHANCE 0x8A14
-+#define CLIP_VTX_REORDER_ENA (1 << 0)
-+#define NUM_CLIP_SEQ(x) ((x) << 1)
-+#define PA_SC_AA_CONFIG 0x28C04
-+#define PA_SC_CLIPRECT_RULE 0x2820C
-+#define PA_SC_EDGERULE 0x28230
-+#define PA_SC_FIFO_SIZE 0x8BCC
-+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
-+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
-+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
-+#define FORCE_EOV_MAX_CLK_CNT(x) ((x)<<0)
-+#define FORCE_EOV_MAX_REZ_CNT(x) ((x)<<16)
-+#define PA_SC_LINE_STIPPLE 0x28A0C
-+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
-+#define PA_SC_MODE_CNTL 0x28A4C
-+#define PA_SC_MULTI_CHIP_CNTL 0x8B20
-+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
-+
-+#define SCRATCH_REG0 0x8500
-+#define SCRATCH_REG1 0x8504
-+#define SCRATCH_REG2 0x8508
-+#define SCRATCH_REG3 0x850C
-+#define SCRATCH_REG4 0x8510
-+#define SCRATCH_REG5 0x8514
-+#define SCRATCH_REG6 0x8518
-+#define SCRATCH_REG7 0x851C
-+#define SCRATCH_UMSK 0x8540
-+#define SCRATCH_ADDR 0x8544
-+
-+#define SMX_DC_CTL0 0xA020
-+#define USE_HASH_FUNCTION (1 << 0)
-+#define CACHE_DEPTH(x) ((x) << 1)
-+#define FLUSH_ALL_ON_EVENT (1 << 10)
-+#define STALL_ON_EVENT (1 << 11)
-+#define SMX_EVENT_CTL 0xA02C
-+#define ES_FLUSH_CTL(x) ((x) << 0)
-+#define GS_FLUSH_CTL(x) ((x) << 3)
-+#define ACK_FLUSH_CTL(x) ((x) << 6)
-+#define SYNC_FLUSH_CTL (1 << 8)
-+
-+#define SPI_CONFIG_CNTL 0x9100
-+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
-+#define DISABLE_INTERP_1 (1 << 5)
-+#define SPI_CONFIG_CNTL_1 0x913C
-+#define VTX_DONE_DELAY(x) ((x) << 0)
-+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
-+#define SPI_INPUT_Z 0x286D8
-+#define SPI_PS_IN_CONTROL_0 0x286CC
-+#define NUM_INTERP(x) ((x)<<0)
-+#define POSITION_ENA (1<<8)
-+#define POSITION_CENTROID (1<<9)
-+#define POSITION_ADDR(x) ((x)<<10)
-+#define PARAM_GEN(x) ((x)<<15)
-+#define PARAM_GEN_ADDR(x) ((x)<<19)
-+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
-+#define PERSP_GRADIENT_ENA (1<<28)
-+#define LINEAR_GRADIENT_ENA (1<<29)
-+#define POSITION_SAMPLE (1<<30)
-+#define BARYC_AT_SAMPLE_ENA (1<<31)
-+
-+#define SQ_CONFIG 0x8C00
-+#define VC_ENABLE (1 << 0)
-+#define EXPORT_SRC_C (1 << 1)
-+#define DX9_CONSTS (1 << 2)
-+#define ALU_INST_PREFER_VECTOR (1 << 3)
-+#define DX10_CLAMP (1 << 4)
-+#define CLAUSE_SEQ_PRIO(x) ((x) << 8)
-+#define PS_PRIO(x) ((x) << 24)
-+#define VS_PRIO(x) ((x) << 26)
-+#define GS_PRIO(x) ((x) << 28)
-+#define SQ_DYN_GPR_SIZE_SIMD_AB_0 0x8DB0
-+#define SIMDA_RING0(x) ((x)<<0)
-+#define SIMDA_RING1(x) ((x)<<8)
-+#define SIMDB_RING0(x) ((x)<<16)
-+#define SIMDB_RING1(x) ((x)<<24)
-+#define SQ_DYN_GPR_SIZE_SIMD_AB_1 0x8DB4
-+#define SQ_DYN_GPR_SIZE_SIMD_AB_2 0x8DB8
-+#define SQ_DYN_GPR_SIZE_SIMD_AB_3 0x8DBC
-+#define SQ_DYN_GPR_SIZE_SIMD_AB_4 0x8DC0
-+#define SQ_DYN_GPR_SIZE_SIMD_AB_5 0x8DC4
-+#define SQ_DYN_GPR_SIZE_SIMD_AB_6 0x8DC8
-+#define SQ_DYN_GPR_SIZE_SIMD_AB_7 0x8DCC
-+#define ES_PRIO(x) ((x) << 30)
-+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
-+#define NUM_PS_GPRS(x) ((x) << 0)
-+#define NUM_VS_GPRS(x) ((x) << 16)
-+#define DYN_GPR_ENABLE (1 << 27)
-+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
-+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
-+#define NUM_GS_GPRS(x) ((x) << 0)
-+#define NUM_ES_GPRS(x) ((x) << 16)
-+#define SQ_MS_FIFO_SIZES 0x8CF0
-+#define CACHE_FIFO_SIZE(x) ((x) << 0)
-+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
-+#define DONE_FIFO_HIWATER(x) ((x) << 16)
-+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
-+#define SQ_STACK_RESOURCE_MGMT_1 0x8C10
-+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
-+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
-+#define SQ_STACK_RESOURCE_MGMT_2 0x8C14
-+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
-+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
-+#define SQ_THREAD_RESOURCE_MGMT 0x8C0C
-+#define NUM_PS_THREADS(x) ((x) << 0)
-+#define NUM_VS_THREADS(x) ((x) << 8)
-+#define NUM_GS_THREADS(x) ((x) << 16)
-+#define NUM_ES_THREADS(x) ((x) << 24)
-+
-+#define SX_DEBUG_1 0x9058
-+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
-+#define SX_EXPORT_BUFFER_SIZES 0x900C
-+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
-+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
-+#define SMX_BUFFER_SIZE(x) ((x) << 16)
-+#define SX_MISC 0x28350
-+
-+#define TA_CNTL_AUX 0x9508
-+#define DISABLE_CUBE_WRAP (1 << 0)
-+#define DISABLE_CUBE_ANISO (1 << 1)
-+#define SYNC_GRADIENT (1 << 24)
-+#define SYNC_WALKER (1 << 25)
-+#define SYNC_ALIGNER (1 << 26)
-+#define BILINEAR_PRECISION_6_BIT (0 << 31)
-+#define BILINEAR_PRECISION_8_BIT (1 << 31)
-+
-+#define TCP_CNTL 0x9610
-+
-+#define VGT_CACHE_INVALIDATION 0x88C4
-+#define CACHE_INVALIDATION(x) ((x)<<0)
-+#define VC_ONLY 0
-+#define TC_ONLY 1
-+#define VC_AND_TC 2
-+#define AUTO_INVLD_EN(x) ((x) << 6)
-+#define NO_AUTO 0
-+#define ES_AUTO 1
-+#define GS_AUTO 2
-+#define ES_AND_GS_AUTO 3
-+#define VGT_ES_PER_GS 0x88CC
-+#define VGT_GS_PER_ES 0x88C8
-+#define VGT_GS_PER_VS 0x88E8
-+#define VGT_GS_VERTEX_REUSE 0x88D4
-+#define VGT_NUM_INSTANCES 0x8974
-+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
-+#define DEALLOC_DIST_MASK 0x0000007F
-+#define VGT_STRMOUT_EN 0x28AB0
-+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
-+#define VTX_REUSE_DEPTH_MASK 0x000000FF
-+
-+#define VM_CONTEXT0_CNTL 0x1410
-+#define ENABLE_CONTEXT (1 << 0)
-+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
-+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
-+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
-+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
-+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
-+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
-+#define VM_L2_CNTL 0x1400
-+#define ENABLE_L2_CACHE (1 << 0)
-+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
-+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
-+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
-+#define VM_L2_CNTL2 0x1404
-+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
-+#define INVALIDATE_L2_CACHE (1 << 1)
-+#define VM_L2_CNTL3 0x1408
-+#define BANK_SELECT(x) ((x) << 0)
-+#define CACHE_UPDATE_MODE(x) ((x) << 6)
-+#define VM_L2_STATUS 0x140C
-+#define L2_BUSY (1 << 0)
-+
-+#define WAIT_UNTIL 0x8040
-+
-+#endif
-diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
-index c2b0d71..87c0625 100644
---- a/drivers/gpu/drm/ttm/ttm_bo.c
-+++ b/drivers/gpu/drm/ttm/ttm_bo.c
-@@ -44,6 +44,39 @@
-
- static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
- static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
-+static void ttm_bo_global_kobj_release(struct kobject *kobj);
-+
-+static struct attribute ttm_bo_count = {
-+ .name = "bo_count",
-+ .mode = S_IRUGO
-+};
-+
-+static ssize_t ttm_bo_global_show(struct kobject *kobj,
-+ struct attribute *attr,
-+ char *buffer)
-+{
-+ struct ttm_bo_global *glob =
-+ container_of(kobj, struct ttm_bo_global, kobj);
-+
-+ return snprintf(buffer, PAGE_SIZE, "%lu\n",
-+ (unsigned long) atomic_read(&glob->bo_count));
-+}
-+
-+static struct attribute *ttm_bo_global_attrs[] = {
-+ &ttm_bo_count,
-+ NULL
-+};
-+
-+static struct sysfs_ops ttm_bo_global_ops = {
-+ .show = &ttm_bo_global_show
-+};
-+
-+static struct kobj_type ttm_bo_glob_kobj_type = {
-+ .release = &ttm_bo_global_kobj_release,
-+ .sysfs_ops = &ttm_bo_global_ops,
-+ .default_attrs = ttm_bo_global_attrs
-+};
-+
-
- static inline uint32_t ttm_bo_type_flags(unsigned type)
- {
-@@ -66,10 +99,11 @@ static void ttm_bo_release_list(struct kref *list_kref)
-
- if (bo->ttm)
- ttm_tt_destroy(bo->ttm);
-+ atomic_dec(&bo->glob->bo_count);
- if (bo->destroy)
- bo->destroy(bo);
- else {
-- ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
-+ ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
- kfree(bo);
- }
- }
-@@ -106,7 +140,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
- kref_get(&bo->list_kref);
-
- if (bo->ttm != NULL) {
-- list_add_tail(&bo->swap, &bdev->swap_lru);
-+ list_add_tail(&bo->swap, &bo->glob->swap_lru);
- kref_get(&bo->list_kref);
- }
- }
-@@ -141,7 +175,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_sequence, uint32_t sequence)
- {
-- struct ttm_bo_device *bdev = bo->bdev;
-+ struct ttm_bo_global *glob = bo->glob;
- int ret;
-
- while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
-@@ -153,9 +187,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
- if (no_wait)
- return -EBUSY;
-
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- ret = ttm_bo_wait_unreserved(bo, interruptible);
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
-
- if (unlikely(ret))
- return ret;
-@@ -181,16 +215,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_sequence, uint32_t sequence)
- {
-- struct ttm_bo_device *bdev = bo->bdev;
-+ struct ttm_bo_global *glob = bo->glob;
- int put_count = 0;
- int ret;
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
- sequence);
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(bo);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
-
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
-@@ -200,13 +234,13 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
-
- void ttm_bo_unreserve(struct ttm_buffer_object *bo)
- {
-- struct ttm_bo_device *bdev = bo->bdev;
-+ struct ttm_bo_global *glob = bo->glob;
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- }
- EXPORT_SYMBOL(ttm_bo_unreserve);
-
-@@ -217,6 +251,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
- static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
- {
- struct ttm_bo_device *bdev = bo->bdev;
-+ struct ttm_bo_global *glob = bo->glob;
- int ret = 0;
- uint32_t page_flags = 0;
-
-@@ -232,14 +267,14 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
- case ttm_bo_type_kernel:
- bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
-- page_flags, bdev->dummy_read_page);
-+ page_flags, glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
- ret = -ENOMEM;
- break;
- case ttm_bo_type_user:
- bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags | TTM_PAGE_FLAG_USER,
-- bdev->dummy_read_page);
-+ glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
- ret = -ENOMEM;
- break;
-@@ -360,6 +395,7 @@ out_err:
- static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
- {
- struct ttm_bo_device *bdev = bo->bdev;
-+ struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver = bdev->driver;
- int ret;
-
-@@ -371,7 +407,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
-
- spin_unlock(&bo->lock);
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
- BUG_ON(ret);
- if (bo->ttm)
-@@ -386,7 +422,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
- bo->mem.mm_node = NULL;
- }
- put_count = ttm_bo_del_from_lru(bo);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
-
- atomic_set(&bo->reserved, 0);
-
-@@ -396,14 +432,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
- return 0;
- }
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- if (list_empty(&bo->ddestroy)) {
- void *sync_obj = bo->sync_obj;
- void *sync_obj_arg = bo->sync_obj_arg;
-
- kref_get(&bo->list_kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
-
- if (sync_obj)
-@@ -413,7 +449,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
- ret = 0;
-
- } else {
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
- ret = -EBUSY;
- }
-@@ -428,11 +464,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
-
- static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
- {
-+ struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry, *nentry;
- struct list_head *list, *next;
- int ret;
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- list_for_each_safe(list, next, &bdev->ddestroy) {
- entry = list_entry(list, struct ttm_buffer_object, ddestroy);
- nentry = NULL;
-@@ -449,16 +486,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
- }
- kref_get(&entry->list_kref);
-
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(entry, remove_all);
- kref_put(&entry->list_kref, ttm_bo_release_list);
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- if (nentry) {
- bool next_onlist = !list_empty(next);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- kref_put(&nentry->list_kref, ttm_bo_release_list);
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- /*
- * Someone might have raced us and removed the
- * next entry from the list. We don't bother restarting
-@@ -472,7 +509,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
- break;
- }
- ret = !list_empty(&bdev->ddestroy);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
-
- return ret;
- }
-@@ -522,6 +559,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
- {
- int ret = 0;
- struct ttm_bo_device *bdev = bo->bdev;
-+ struct ttm_bo_global *glob = bo->glob;
- struct ttm_mem_reg evict_mem;
- uint32_t proposed_placement;
-
-@@ -570,12 +608,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
- goto out;
- }
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- if (evict_mem.mm_node) {
- drm_mm_put_block(evict_mem.mm_node);
- evict_mem.mm_node = NULL;
- }
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- bo->evicted = true;
- out:
- return ret;
-@@ -590,6 +628,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
- uint32_t mem_type,
- bool interruptible, bool no_wait)
- {
-+ struct ttm_bo_global *glob = bdev->glob;
- struct drm_mm_node *node;
- struct ttm_buffer_object *entry;
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-@@ -603,7 +642,7 @@ retry_pre_get:
- if (unlikely(ret != 0))
- return ret;
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- do {
- node = drm_mm_search_free(&man->manager, num_pages,
- mem->page_alignment, 1);
-@@ -624,7 +663,7 @@ retry_pre_get:
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(entry);
-
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
-
- if (unlikely(ret != 0))
- return ret;
-@@ -640,21 +679,21 @@ retry_pre_get:
- if (ret)
- return ret;
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- } while (1);
-
- if (!node) {
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- return -ENOMEM;
- }
-
- node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
- if (unlikely(!node)) {
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- goto retry_pre_get;
- }
-
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- mem->mm_node = node;
- mem->mem_type = mem_type;
- return 0;
-@@ -723,6 +762,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait)
- {
- struct ttm_bo_device *bdev = bo->bdev;
-+ struct ttm_bo_global *glob = bo->glob;
- struct ttm_mem_type_manager *man;
-
- uint32_t num_prios = bdev->driver->num_mem_type_prio;
-@@ -762,20 +802,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- if (unlikely(ret))
- return ret;
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- node = drm_mm_search_free(&man->manager,
- mem->num_pages,
- mem->page_alignment,
- 1);
- if (unlikely(!node)) {
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- break;
- }
- node = drm_mm_get_block_atomic(node,
- mem->num_pages,
- mem->
- page_alignment);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- } while (!node);
- }
- if (node)
-@@ -848,7 +888,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
- {
-- struct ttm_bo_device *bdev = bo->bdev;
-+ struct ttm_bo_global *glob = bo->glob;
- int ret = 0;
- struct ttm_mem_reg mem;
-
-@@ -884,9 +924,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
-
- out_unlock:
- if (ret && mem.mm_node) {
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- drm_mm_put_block(mem.mm_node);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- }
- return ret;
- }
-@@ -1022,6 +1062,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
- INIT_LIST_HEAD(&bo->ddestroy);
- INIT_LIST_HEAD(&bo->swap);
- bo->bdev = bdev;
-+ bo->glob = bdev->glob;
- bo->type = type;
- bo->num_pages = num_pages;
- bo->mem.mem_type = TTM_PL_SYSTEM;
-@@ -1034,6 +1075,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
- bo->seq_valid = false;
- bo->persistant_swap_storage = persistant_swap_storage;
- bo->acc_size = acc_size;
-+ atomic_inc(&bo->glob->bo_count);
-
- ret = ttm_bo_check_placement(bo, flags, 0ULL);
- if (unlikely(ret != 0))
-@@ -1072,13 +1114,13 @@ out_err:
- }
- EXPORT_SYMBOL(ttm_buffer_object_init);
-
--static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
-+static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
- unsigned long num_pages)
- {
- size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
- PAGE_MASK;
-
-- return bdev->ttm_bo_size + 2 * page_array_size;
-+ return glob->ttm_bo_size + 2 * page_array_size;
- }
-
- int ttm_buffer_object_create(struct ttm_bo_device *bdev,
-@@ -1093,18 +1135,18 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
- {
- struct ttm_buffer_object *bo;
- int ret;
-- struct ttm_mem_global *mem_glob = bdev->mem_glob;
-+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
-
- size_t acc_size =
-- ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
-+ ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (unlikely(ret != 0))
- return ret;
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-
- if (unlikely(bo == NULL)) {
-- ttm_mem_global_free(mem_glob, acc_size, false);
-+ ttm_mem_global_free(mem_glob, acc_size);
- return -ENOMEM;
- }
-
-@@ -1150,6 +1192,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- struct list_head *head,
- unsigned mem_type, bool allow_errors)
- {
-+ struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry;
- int ret;
- int put_count;
-@@ -1158,30 +1201,31 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- * Can't use standard list traversal since we're unlocking.
- */
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
-
- while (!list_empty(head)) {
- entry = list_first_entry(head, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
- ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
- put_count = ttm_bo_del_from_lru(entry);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
- BUG_ON(ret);
- ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
- ttm_bo_unreserve(entry);
- kref_put(&entry->list_kref, ttm_bo_release_list);
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- }
-
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
-
- return 0;
- }
-
- int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
- {
-+ struct ttm_bo_global *glob = bdev->glob;
- struct ttm_mem_type_manager *man;
- int ret = -EINVAL;
-
-@@ -1204,13 +1248,13 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
- if (mem_type > 0) {
- ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- if (drm_mm_clean(&man->manager))
- drm_mm_takedown(&man->manager);
- else
- ret = -EBUSY;
-
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- }
-
- return ret;
-@@ -1284,11 +1328,82 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- }
- EXPORT_SYMBOL(ttm_bo_init_mm);
-
-+static void ttm_bo_global_kobj_release(struct kobject *kobj)
-+{
-+ struct ttm_bo_global *glob =
-+ container_of(kobj, struct ttm_bo_global, kobj);
-+
-+ ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
-+ __free_page(glob->dummy_read_page);
-+ kfree(glob);
-+}
-+
-+void ttm_bo_global_release(struct ttm_global_reference *ref)
-+{
-+ struct ttm_bo_global *glob = ref->object;
-+
-+ kobject_del(&glob->kobj);
-+ kobject_put(&glob->kobj);
-+}
-+EXPORT_SYMBOL(ttm_bo_global_release);
-+
-+int ttm_bo_global_init(struct ttm_global_reference *ref)
-+{
-+ struct ttm_bo_global_ref *bo_ref =
-+ container_of(ref, struct ttm_bo_global_ref, ref);
-+ struct ttm_bo_global *glob = ref->object;
-+ int ret;
-+
-+ mutex_init(&glob->device_list_mutex);
-+ spin_lock_init(&glob->lru_lock);
-+ glob->mem_glob = bo_ref->mem_glob;
-+ glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
-+
-+ if (unlikely(glob->dummy_read_page == NULL)) {
-+ ret = -ENOMEM;
-+ goto out_no_drp;
-+ }
-+
-+ INIT_LIST_HEAD(&glob->swap_lru);
-+ INIT_LIST_HEAD(&glob->device_list);
-+
-+ ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
-+ ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
-+ if (unlikely(ret != 0)) {
-+ printk(KERN_ERR TTM_PFX
-+ "Could not register buffer object swapout.\n");
-+ goto out_no_shrink;
-+ }
-+
-+ glob->ttm_bo_extra_size =
-+ ttm_round_pot(sizeof(struct ttm_tt)) +
-+ ttm_round_pot(sizeof(struct ttm_backend));
-+
-+ glob->ttm_bo_size = glob->ttm_bo_extra_size +
-+ ttm_round_pot(sizeof(struct ttm_buffer_object));
-+
-+ atomic_set(&glob->bo_count, 0);
-+
-+ kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
-+ ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
-+ if (unlikely(ret != 0))
-+ kobject_put(&glob->kobj);
-+ return ret;
-+out_no_shrink:
-+ __free_page(glob->dummy_read_page);
-+out_no_drp:
-+ kfree(glob);
-+ return ret;
-+}
-+EXPORT_SYMBOL(ttm_bo_global_init);
-+
-+
- int ttm_bo_device_release(struct ttm_bo_device *bdev)
- {
- int ret = 0;
- unsigned i = TTM_NUM_MEM_TYPES;
- struct ttm_mem_type_manager *man;
-+ struct ttm_bo_global *glob = bdev->glob;
-
- while (i--) {
- man = &bdev->man[i];
-@@ -1304,100 +1419,74 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
- }
- }
-
-+ mutex_lock(&glob->device_list_mutex);
-+ list_del(&bdev->device_list);
-+ mutex_unlock(&glob->device_list_mutex);
-+
- if (!cancel_delayed_work(&bdev->wq))
- flush_scheduled_work();
-
- while (ttm_bo_delayed_delete(bdev, true))
- ;
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- if (list_empty(&bdev->ddestroy))
- TTM_DEBUG("Delayed destroy list was clean\n");
-
- if (list_empty(&bdev->man[0].lru))
- TTM_DEBUG("Swap list was clean\n");
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
-
-- ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
- BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
- write_lock(&bdev->vm_lock);
- drm_mm_takedown(&bdev->addr_space_mm);
- write_unlock(&bdev->vm_lock);
-
-- __free_page(bdev->dummy_read_page);
- return ret;
- }
- EXPORT_SYMBOL(ttm_bo_device_release);
-
--/*
-- * This function is intended to be called on drm driver load.
-- * If you decide to call it from firstopen, you must protect the call
-- * from a potentially racing ttm_bo_driver_finish in lastclose.
-- * (This may happen on X server restart).
-- */
--
- int ttm_bo_device_init(struct ttm_bo_device *bdev,
-- struct ttm_mem_global *mem_glob,
-- struct ttm_bo_driver *driver, uint64_t file_page_offset,
-+ struct ttm_bo_global *glob,
-+ struct ttm_bo_driver *driver,
-+ uint64_t file_page_offset,
- bool need_dma32)
- {
- int ret = -EINVAL;
-
-- bdev->dummy_read_page = NULL;
- rwlock_init(&bdev->vm_lock);
-- spin_lock_init(&bdev->lru_lock);
--
- bdev->driver = driver;
-- bdev->mem_glob = mem_glob;
-
- memset(bdev->man, 0, sizeof(bdev->man));
-
-- bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
-- if (unlikely(bdev->dummy_read_page == NULL)) {
-- ret = -ENOMEM;
-- goto out_err0;
-- }
--
- /*
- * Initialize the system memory buffer type.
- * Other types need to be driver / IOCTL initialized.
- */
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
- if (unlikely(ret != 0))
-- goto out_err1;
-+ goto out_no_sys;
-
- bdev->addr_space_rb = RB_ROOT;
- ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
- if (unlikely(ret != 0))
-- goto out_err2;
-+ goto out_no_addr_mm;
-
- INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
- bdev->nice_mode = true;
- INIT_LIST_HEAD(&bdev->ddestroy);
-- INIT_LIST_HEAD(&bdev->swap_lru);
- bdev->dev_mapping = NULL;
-+ bdev->glob = glob;
- bdev->need_dma32 = need_dma32;
-- ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
-- ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
-- if (unlikely(ret != 0)) {
-- printk(KERN_ERR TTM_PFX
-- "Could not register buffer object swapout.\n");
-- goto out_err2;
-- }
-
-- bdev->ttm_bo_extra_size =
-- ttm_round_pot(sizeof(struct ttm_tt)) +
-- ttm_round_pot(sizeof(struct ttm_backend));
--
-- bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
-- ttm_round_pot(sizeof(struct ttm_buffer_object));
-+ mutex_lock(&glob->device_list_mutex);
-+ list_add_tail(&bdev->device_list, &glob->device_list);
-+ mutex_unlock(&glob->device_list_mutex);
-
- return 0;
--out_err2:
-+out_no_addr_mm:
- ttm_bo_clean_mm(bdev, 0);
--out_err1:
-- __free_page(bdev->dummy_read_page);
--out_err0:
-+out_no_sys:
- return ret;
- }
- EXPORT_SYMBOL(ttm_bo_device_init);
-@@ -1647,21 +1736,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
-
- static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
- {
-- struct ttm_bo_device *bdev =
-- container_of(shrink, struct ttm_bo_device, shrink);
-+ struct ttm_bo_global *glob =
-+ container_of(shrink, struct ttm_bo_global, shrink);
- struct ttm_buffer_object *bo;
- int ret = -EBUSY;
- int put_count;
- uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
-
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- while (ret == -EBUSY) {
-- if (unlikely(list_empty(&bdev->swap_lru))) {
-- spin_unlock(&bdev->lru_lock);
-+ if (unlikely(list_empty(&glob->swap_lru))) {
-+ spin_unlock(&glob->lru_lock);
- return -EBUSY;
- }
-
-- bo = list_first_entry(&bdev->swap_lru,
-+ bo = list_first_entry(&glob->swap_lru,
- struct ttm_buffer_object, swap);
- kref_get(&bo->list_kref);
-
-@@ -1673,16 +1762,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
- ttm_bo_wait_unreserved(bo, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
-- spin_lock(&bdev->lru_lock);
-+ spin_lock(&glob->lru_lock);
- }
- }
-
- BUG_ON(ret != 0);
- put_count = ttm_bo_del_from_lru(bo);
-- spin_unlock(&bdev->lru_lock);
-+ spin_unlock(&glob->lru_lock);
-
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
-@@ -1736,6 +1825,6 @@ out:
-
- void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
- {
-- while (ttm_bo_swapout(&bdev->shrink) == 0)
-+ while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
- ;
- }
-diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
-index ad4ada0..c70927e 100644
---- a/drivers/gpu/drm/ttm/ttm_bo_util.c
-+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
-@@ -41,9 +41,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- if (old_mem->mm_node) {
-- spin_lock(&bo->bdev->lru_lock);
-+ spin_lock(&bo->glob->lru_lock);
- drm_mm_put_block(old_mem->mm_node);
-- spin_unlock(&bo->bdev->lru_lock);
-+ spin_unlock(&bo->glob->lru_lock);
- }
- old_mem->mm_node = NULL;
- }
-diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
-index 0b14eb1..541744d 100644
---- a/drivers/gpu/drm/ttm/ttm_global.c
-+++ b/drivers/gpu/drm/ttm/ttm_global.c
-@@ -71,7 +71,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
-
- mutex_lock(&item->mutex);
- if (item->refcount == 0) {
-- item->object = kmalloc(ref->size, GFP_KERNEL);
-+ item->object = kzalloc(ref->size, GFP_KERNEL);
- if (unlikely(item->object == NULL)) {
- ret = -ENOMEM;
- goto out_err;
-@@ -89,7 +89,6 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
- mutex_unlock(&item->mutex);
- return 0;
- out_err:
-- kfree(item->object);
- mutex_unlock(&item->mutex);
- item->object = NULL;
- return ret;
-@@ -105,7 +104,6 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
- BUG_ON(ref->object != item->object);
- if (--item->refcount == 0) {
- ref->release(ref);
-- kfree(item->object);
- item->object = NULL;
- }
- mutex_unlock(&item->mutex);
-diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
-index 87323d4..072c281 100644
---- a/drivers/gpu/drm/ttm/ttm_memory.c
-+++ b/drivers/gpu/drm/ttm/ttm_memory.c
-@@ -26,15 +26,180 @@
- **************************************************************************/
-
- #include "ttm/ttm_memory.h"
-+#include "ttm/ttm_module.h"
- #include <linux/spinlock.h>
- #include <linux/sched.h>
- #include <linux/wait.h>
- #include <linux/mm.h>
- #include <linux/module.h>
-
--#define TTM_PFX "[TTM] "
- #define TTM_MEMORY_ALLOC_RETRIES 4
-
-+struct ttm_mem_zone {
-+ struct kobject kobj;
-+ struct ttm_mem_global *glob;
-+ const char *name;
-+ uint64_t zone_mem;
-+ uint64_t emer_mem;
-+ uint64_t max_mem;
-+ uint64_t swap_limit;
-+ uint64_t used_mem;
-+};
-+
-+static struct attribute ttm_mem_sys = {
-+ .name = "zone_memory",
-+ .mode = S_IRUGO
-+};
-+static struct attribute ttm_mem_emer = {
-+ .name = "emergency_memory",
-+ .mode = S_IRUGO | S_IWUSR
-+};
-+static struct attribute ttm_mem_max = {
-+ .name = "available_memory",
-+ .mode = S_IRUGO | S_IWUSR
-+};
-+static struct attribute ttm_mem_swap = {
-+ .name = "swap_limit",
-+ .mode = S_IRUGO | S_IWUSR
-+};
-+static struct attribute ttm_mem_used = {
-+ .name = "used_memory",
-+ .mode = S_IRUGO
-+};
-+
-+static void ttm_mem_zone_kobj_release(struct kobject *kobj)
-+{
-+ struct ttm_mem_zone *zone =
-+ container_of(kobj, struct ttm_mem_zone, kobj);
-+
-+ printk(KERN_INFO TTM_PFX
-+ "Zone %7s: Used memory at exit: %llu kiB.\n",
-+ zone->name, (unsigned long long) zone->used_mem >> 10);
-+ kfree(zone);
-+}
-+
-+static ssize_t ttm_mem_zone_show(struct kobject *kobj,
-+ struct attribute *attr,
-+ char *buffer)
-+{
-+ struct ttm_mem_zone *zone =
-+ container_of(kobj, struct ttm_mem_zone, kobj);
-+ uint64_t val = 0;
-+
-+ spin_lock(&zone->glob->lock);
-+ if (attr == &ttm_mem_sys)
-+ val = zone->zone_mem;
-+ else if (attr == &ttm_mem_emer)
-+ val = zone->emer_mem;
-+ else if (attr == &ttm_mem_max)
-+ val = zone->max_mem;
-+ else if (attr == &ttm_mem_swap)
-+ val = zone->swap_limit;
-+ else if (attr == &ttm_mem_used)
-+ val = zone->used_mem;
-+ spin_unlock(&zone->glob->lock);
-+
-+ return snprintf(buffer, PAGE_SIZE, "%llu\n",
-+ (unsigned long long) val >> 10);
-+}
-+
-+static void ttm_check_swapping(struct ttm_mem_global *glob);
-+
-+static ssize_t ttm_mem_zone_store(struct kobject *kobj,
-+ struct attribute *attr,
-+ const char *buffer,
-+ size_t size)
-+{
-+ struct ttm_mem_zone *zone =
-+ container_of(kobj, struct ttm_mem_zone, kobj);
-+ int chars;
-+ unsigned long val;
-+ uint64_t val64;
-+
-+ chars = sscanf(buffer, "%lu", &val);
-+ if (chars == 0)
-+ return size;
-+
-+ val64 = val;
-+ val64 <<= 10;
-+
-+ spin_lock(&zone->glob->lock);
-+ if (val64 > zone->zone_mem)
-+ val64 = zone->zone_mem;
-+ if (attr == &ttm_mem_emer) {
-+ zone->emer_mem = val64;
-+ if (zone->max_mem > val64)
-+ zone->max_mem = val64;
-+ } else if (attr == &ttm_mem_max) {
-+ zone->max_mem = val64;
-+ if (zone->emer_mem < val64)
-+ zone->emer_mem = val64;
-+ } else if (attr == &ttm_mem_swap)
-+ zone->swap_limit = val64;
-+ spin_unlock(&zone->glob->lock);
-+
-+ ttm_check_swapping(zone->glob);
-+
-+ return size;
-+}
-+
-+static struct attribute *ttm_mem_zone_attrs[] = {
-+ &ttm_mem_sys,
-+ &ttm_mem_emer,
-+ &ttm_mem_max,
-+ &ttm_mem_swap,
-+ &ttm_mem_used,
-+ NULL
-+};
-+
-+static struct sysfs_ops ttm_mem_zone_ops = {
-+ .show = &ttm_mem_zone_show,
-+ .store = &ttm_mem_zone_store
-+};
-+
-+static struct kobj_type ttm_mem_zone_kobj_type = {
-+ .release = &ttm_mem_zone_kobj_release,
-+ .sysfs_ops = &ttm_mem_zone_ops,
-+ .default_attrs = ttm_mem_zone_attrs,
-+};
-+
-+static void ttm_mem_global_kobj_release(struct kobject *kobj)
-+{
-+ struct ttm_mem_global *glob =
-+ container_of(kobj, struct ttm_mem_global, kobj);
-+
-+ kfree(glob);
-+}
-+
-+static struct kobj_type ttm_mem_glob_kobj_type = {
-+ .release = &ttm_mem_global_kobj_release,
-+};
-+
-+static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
-+ bool from_wq, uint64_t extra)
-+{
-+ unsigned int i;
-+ struct ttm_mem_zone *zone;
-+ uint64_t target;
-+
-+ for (i = 0; i < glob->num_zones; ++i) {
-+ zone = glob->zones[i];
-+
-+ if (from_wq)
-+ target = zone->swap_limit;
-+ else if (capable(CAP_SYS_ADMIN))
-+ target = zone->emer_mem;
-+ else
-+ target = zone->max_mem;
-+
-+ target = (extra > target) ? 0ULL : target;
-+
-+ if (zone->used_mem > target)
-+ return true;
-+ }
-+ return false;
-+}
-+
- /**
- * At this point we only support a single shrink callback.
- * Extend this if needed, perhaps using a linked list of callbacks.
-@@ -42,34 +207,17 @@
- * many threads may try to swap out at any given time.
- */
-
--static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
-+static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
- uint64_t extra)
- {
- int ret;
- struct ttm_mem_shrink *shrink;
-- uint64_t target;
-- uint64_t total_target;
-
- spin_lock(&glob->lock);
- if (glob->shrink == NULL)
- goto out;
-
-- if (from_workqueue) {
-- target = glob->swap_limit;
-- total_target = glob->total_memory_swap_limit;
-- } else if (capable(CAP_SYS_ADMIN)) {
-- total_target = glob->emer_total_memory;
-- target = glob->emer_memory;
-- } else {
-- total_target = glob->max_total_memory;
-- target = glob->max_memory;
-- }
--
-- total_target = (extra >= total_target) ? 0 : total_target - extra;
-- target = (extra >= target) ? 0 : target - extra;
--
-- while (glob->used_memory > target ||
-- glob->used_total_memory > total_target) {
-+ while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
- shrink = glob->shrink;
- spin_unlock(&glob->lock);
- ret = shrink->do_shrink(shrink);
-@@ -81,6 +229,8 @@ out:
- spin_unlock(&glob->lock);
- }
-
-+
-+
- static void ttm_shrink_work(struct work_struct *work)
- {
- struct ttm_mem_global *glob =
-@@ -89,63 +239,198 @@ static void ttm_shrink_work(struct work_struct *work)
- ttm_shrink(glob, true, 0ULL);
- }
-
-+static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
-+ const struct sysinfo *si)
-+{
-+ struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
-+ uint64_t mem;
-+ int ret;
-+
-+ if (unlikely(!zone))
-+ return -ENOMEM;
-+
-+ mem = si->totalram - si->totalhigh;
-+ mem *= si->mem_unit;
-+
-+ zone->name = "kernel";
-+ zone->zone_mem = mem;
-+ zone->max_mem = mem >> 1;
-+ zone->emer_mem = (mem >> 1) + (mem >> 2);
-+ zone->swap_limit = zone->max_mem - (mem >> 3);
-+ zone->used_mem = 0;
-+ zone->glob = glob;
-+ glob->zone_kernel = zone;
-+ kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-+ ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
-+ if (unlikely(ret != 0)) {
-+ kobject_put(&zone->kobj);
-+ return ret;
-+ }
-+ glob->zones[glob->num_zones++] = zone;
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HIGHMEM
-+static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
-+ const struct sysinfo *si)
-+{
-+ struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
-+ uint64_t mem;
-+ int ret;
-+
-+ if (unlikely(!zone))
-+ return -ENOMEM;
-+
-+ if (si->totalhigh == 0)
-+ return 0;
-+
-+ mem = si->totalram;
-+ mem *= si->mem_unit;
-+
-+ zone->name = "highmem";
-+ zone->zone_mem = mem;
-+ zone->max_mem = mem >> 1;
-+ zone->emer_mem = (mem >> 1) + (mem >> 2);
-+ zone->swap_limit = zone->max_mem - (mem >> 3);
-+ zone->used_mem = 0;
-+ zone->glob = glob;
-+ glob->zone_highmem = zone;
-+ kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-+ ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
-+ if (unlikely(ret != 0)) {
-+ kobject_put(&zone->kobj);
-+ return ret;
-+ }
-+ glob->zones[glob->num_zones++] = zone;
-+ return 0;
-+}
-+#else
-+static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
-+ const struct sysinfo *si)
-+{
-+ struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
-+ uint64_t mem;
-+ int ret;
-+
-+ if (unlikely(!zone))
-+ return -ENOMEM;
-+
-+ mem = si->totalram;
-+ mem *= si->mem_unit;
-+
-+ /**
-+ * No special dma32 zone needed.
-+ */
-+
-+ if (mem <= ((uint64_t) 1ULL << 32))
-+ return 0;
-+
-+ /*
-+ * Limit max dma32 memory to 4GB for now
-+ * until we can figure out how big this
-+ * zone really is.
-+ */
-+
-+ mem = ((uint64_t) 1ULL << 32);
-+ zone->name = "dma32";
-+ zone->zone_mem = mem;
-+ zone->max_mem = mem >> 1;
-+ zone->emer_mem = (mem >> 1) + (mem >> 2);
-+ zone->swap_limit = zone->max_mem - (mem >> 3);
-+ zone->used_mem = 0;
-+ zone->glob = glob;
-+ glob->zone_dma32 = zone;
-+ kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
-+ ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
-+ if (unlikely(ret != 0)) {
-+ kobject_put(&zone->kobj);
-+ return ret;
-+ }
-+ glob->zones[glob->num_zones++] = zone;
-+ return 0;
-+}
-+#endif
-+
- int ttm_mem_global_init(struct ttm_mem_global *glob)
- {
- struct sysinfo si;
-- uint64_t mem;
-+ int ret;
-+ int i;
-+ struct ttm_mem_zone *zone;
-
- spin_lock_init(&glob->lock);
- glob->swap_queue = create_singlethread_workqueue("ttm_swap");
- INIT_WORK(&glob->work, ttm_shrink_work);
- init_waitqueue_head(&glob->queue);
-+ kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
-+ ret = kobject_add(&glob->kobj,
-+ ttm_get_kobj(),
-+ "memory_accounting");
-+ if (unlikely(ret != 0)) {
-+ kobject_put(&glob->kobj);
-+ return ret;
-+ }
-
- si_meminfo(&si);
-
-- mem = si.totalram - si.totalhigh;
-- mem *= si.mem_unit;
--
-- glob->max_memory = mem >> 1;
-- glob->emer_memory = (mem >> 1) + (mem >> 2);
-- glob->swap_limit = glob->max_memory - (mem >> 3);
-- glob->used_memory = 0;
-- glob->used_total_memory = 0;
-- glob->shrink = NULL;
--
-- mem = si.totalram;
-- mem *= si.mem_unit;
--
-- glob->max_total_memory = mem >> 1;
-- glob->emer_total_memory = (mem >> 1) + (mem >> 2);
--
-- glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3);
--
-- printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
-- glob->max_total_memory >> 20);
-- printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
-- glob->max_memory >> 20);
--
-+ ret = ttm_mem_init_kernel_zone(glob, &si);
-+ if (unlikely(ret != 0))
-+ goto out_no_zone;
-+#ifdef CONFIG_HIGHMEM
-+ ret = ttm_mem_init_highmem_zone(glob, &si);
-+ if (unlikely(ret != 0))
-+ goto out_no_zone;
-+#else
-+ ret = ttm_mem_init_dma32_zone(glob, &si);
-+ if (unlikely(ret != 0))
-+ goto out_no_zone;
-+#endif
-+ for (i = 0; i < glob->num_zones; ++i) {
-+ zone = glob->zones[i];
-+ printk(KERN_INFO TTM_PFX
-+ "Zone %7s: Available graphics memory: %llu kiB.\n",
-+ zone->name, (unsigned long long) zone->max_mem >> 10);
-+ }
- return 0;
-+out_no_zone:
-+ ttm_mem_global_release(glob);
-+ return ret;
- }
- EXPORT_SYMBOL(ttm_mem_global_init);
-
- void ttm_mem_global_release(struct ttm_mem_global *glob)
- {
-- printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n",
-- (unsigned long long)glob->used_total_memory);
-+ unsigned int i;
-+ struct ttm_mem_zone *zone;
-+
- flush_workqueue(glob->swap_queue);
- destroy_workqueue(glob->swap_queue);
- glob->swap_queue = NULL;
-+ for (i = 0; i < glob->num_zones; ++i) {
-+ zone = glob->zones[i];
-+ kobject_del(&zone->kobj);
-+ kobject_put(&zone->kobj);
-+ }
-+ kobject_del(&glob->kobj);
-+ kobject_put(&glob->kobj);
- }
- EXPORT_SYMBOL(ttm_mem_global_release);
-
--static inline void ttm_check_swapping(struct ttm_mem_global *glob)
-+static void ttm_check_swapping(struct ttm_mem_global *glob)
- {
-- bool needs_swapping;
-+ bool needs_swapping = false;
-+ unsigned int i;
-+ struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
-- needs_swapping = (glob->used_memory > glob->swap_limit ||
-- glob->used_total_memory >
-- glob->total_memory_swap_limit);
-+ for (i = 0; i < glob->num_zones; ++i) {
-+ zone = glob->zones[i];
-+ if (zone->used_mem > zone->swap_limit) {
-+ needs_swapping = true;
-+ break;
-+ }
-+ }
-+
- spin_unlock(&glob->lock);
-
- if (unlikely(needs_swapping))
-@@ -153,44 +438,60 @@ static inline void ttm_check_swapping(struct ttm_mem_global *glob)
-
- }
-
--void ttm_mem_global_free(struct ttm_mem_global *glob,
-- uint64_t amount, bool himem)
-+static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
-+ struct ttm_mem_zone *single_zone,
-+ uint64_t amount)
- {
-+ unsigned int i;
-+ struct ttm_mem_zone *zone;
-+
- spin_lock(&glob->lock);
-- glob->used_total_memory -= amount;
-- if (!himem)
-- glob->used_memory -= amount;
-- wake_up_all(&glob->queue);
-+ for (i = 0; i < glob->num_zones; ++i) {
-+ zone = glob->zones[i];
-+ if (single_zone && zone != single_zone)
-+ continue;
-+ zone->used_mem -= amount;
-+ }
- spin_unlock(&glob->lock);
- }
-
-+void ttm_mem_global_free(struct ttm_mem_global *glob,
-+ uint64_t amount)
-+{
-+ return ttm_mem_global_free_zone(glob, NULL, amount);
-+}
-+
- static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
-- uint64_t amount, bool himem, bool reserve)
-+ struct ttm_mem_zone *single_zone,
-+ uint64_t amount, bool reserve)
- {
- uint64_t limit;
-- uint64_t lomem_limit;
- int ret = -ENOMEM;
-+ unsigned int i;
-+ struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
-+ for (i = 0; i < glob->num_zones; ++i) {
-+ zone = glob->zones[i];
-+ if (single_zone && zone != single_zone)
-+ continue;
-
-- if (capable(CAP_SYS_ADMIN)) {
-- limit = glob->emer_total_memory;
-- lomem_limit = glob->emer_memory;
-- } else {
-- limit = glob->max_total_memory;
-- lomem_limit = glob->max_memory;
-- }
-+ limit = (capable(CAP_SYS_ADMIN)) ?
-+ zone->emer_mem : zone->max_mem;
-
-- if (unlikely(glob->used_total_memory + amount > limit))
-- goto out_unlock;
-- if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
-- goto out_unlock;
-+ if (zone->used_mem > limit)
-+ goto out_unlock;
-+ }
-
- if (reserve) {
-- glob->used_total_memory += amount;
-- if (!himem)
-- glob->used_memory += amount;
-+ for (i = 0; i < glob->num_zones; ++i) {
-+ zone = glob->zones[i];
-+ if (single_zone && zone != single_zone)
-+ continue;
-+ zone->used_mem += amount;
-+ }
- }
-+
- ret = 0;
- out_unlock:
- spin_unlock(&glob->lock);
-@@ -199,12 +500,17 @@ out_unlock:
- return ret;
- }
-
--int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
-- bool no_wait, bool interruptible, bool himem)
-+
-+static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
-+ struct ttm_mem_zone *single_zone,
-+ uint64_t memory,
-+ bool no_wait, bool interruptible)
- {
- int count = TTM_MEMORY_ALLOC_RETRIES;
-
-- while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true)
-+ while (unlikely(ttm_mem_global_reserve(glob,
-+ single_zone,
-+ memory, true)
- != 0)) {
- if (no_wait)
- return -ENOMEM;
-@@ -216,6 +522,56 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- return 0;
- }
-
-+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
-+ bool no_wait, bool interruptible)
-+{
-+ /**
-+ * Normal allocations of kernel memory are registered in
-+ * all zones.
-+ */
-+
-+ return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
-+ interruptible);
-+}
-+
-+int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
-+ struct page *page,
-+ bool no_wait, bool interruptible)
-+{
-+
-+ struct ttm_mem_zone *zone = NULL;
-+
-+ /**
-+ * Page allocations may be registed in a single zone
-+ * only if highmem or !dma32.
-+ */
-+
-+#ifdef CONFIG_HIGHMEM
-+ if (PageHighMem(page) && glob->zone_highmem != NULL)
-+ zone = glob->zone_highmem;
-+#else
-+ if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
-+ zone = glob->zone_kernel;
-+#endif
-+ return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
-+ interruptible);
-+}
-+
-+void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
-+{
-+ struct ttm_mem_zone *zone = NULL;
-+
-+#ifdef CONFIG_HIGHMEM
-+ if (PageHighMem(page) && glob->zone_highmem != NULL)
-+ zone = glob->zone_highmem;
-+#else
-+ if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
-+ zone = glob->zone_kernel;
-+#endif
-+ ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
-+}
-+
-+
- size_t ttm_round_pot(size_t size)
- {
- if ((size & (size - 1)) == 0)
-diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
-index 59ce819..9a6edbf 100644
---- a/drivers/gpu/drm/ttm/ttm_module.c
-+++ b/drivers/gpu/drm/ttm/ttm_module.c
-@@ -29,16 +29,72 @@
- * Jerome Glisse
- */
- #include <linux/module.h>
--#include <ttm/ttm_module.h>
-+#include <linux/device.h>
-+#include <linux/sched.h>
-+#include "ttm/ttm_module.h"
-+#include "drm_sysfs.h"
-+
-+static DECLARE_WAIT_QUEUE_HEAD(exit_q);
-+atomic_t device_released;
-+
-+static struct device_type ttm_drm_class_type = {
-+ .name = "ttm",
-+ /**
-+ * Add pm ops here.
-+ */
-+};
-+
-+static void ttm_drm_class_device_release(struct device *dev)
-+{
-+ atomic_set(&device_released, 1);
-+ wake_up_all(&exit_q);
-+}
-+
-+static struct device ttm_drm_class_device = {
-+ .type = &ttm_drm_class_type,
-+ .release = &ttm_drm_class_device_release
-+};
-+
-+struct kobject *ttm_get_kobj(void)
-+{
-+ struct kobject *kobj = &ttm_drm_class_device.kobj;
-+ BUG_ON(kobj == NULL);
-+ return kobj;
-+}
-
- static int __init ttm_init(void)
- {
-+ int ret;
-+
-+ ret = dev_set_name(&ttm_drm_class_device, "ttm");
-+ if (unlikely(ret != 0))
-+ return ret;
-+
- ttm_global_init();
-+
-+ atomic_set(&device_released, 0);
-+ ret = drm_class_device_register(&ttm_drm_class_device);
-+ if (unlikely(ret != 0))
-+ goto out_no_dev_reg;
-+
- return 0;
-+out_no_dev_reg:
-+ atomic_set(&device_released, 1);
-+ wake_up_all(&exit_q);
-+ ttm_global_release();
-+ return ret;
- }
-
- static void __exit ttm_exit(void)
- {
-+ drm_class_device_unregister(&ttm_drm_class_device);
-+
-+ /**
-+ * Refuse to unload until the TTM device is released.
-+ * Not sure this is 100% needed.
-+ */
-+
-+ wait_event(exit_q, atomic_read(&device_released) == 1);
- ttm_global_release();
- }
-
-diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
-index b8b6c4a..a55ee1a 100644
---- a/drivers/gpu/drm/ttm/ttm_tt.c
-+++ b/drivers/gpu/drm/ttm/ttm_tt.c
-@@ -34,76 +34,13 @@
- #include <linux/pagemap.h>
- #include <linux/file.h>
- #include <linux/swap.h>
-+#include "drm_cache.h"
- #include "ttm/ttm_module.h"
- #include "ttm/ttm_bo_driver.h"
- #include "ttm/ttm_placement.h"
-
- static int ttm_tt_swapin(struct ttm_tt *ttm);
-
--#if defined(CONFIG_X86)
--static void ttm_tt_clflush_page(struct page *page)
--{
-- uint8_t *page_virtual;
-- unsigned int i;
--
-- if (unlikely(page == NULL))
-- return;
--
-- page_virtual = kmap_atomic(page, KM_USER0);
--
-- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
-- clflush(page_virtual + i);
--
-- kunmap_atomic(page_virtual, KM_USER0);
--}
--
--static void ttm_tt_cache_flush_clflush(struct page *pages[],
-- unsigned long num_pages)
--{
-- unsigned long i;
--
-- mb();
-- for (i = 0; i < num_pages; ++i)
-- ttm_tt_clflush_page(*pages++);
-- mb();
--}
--#elif !defined(__powerpc__)
--static void ttm_tt_ipi_handler(void *null)
--{
-- ;
--}
--#endif
--
--void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
--{
--
--#if defined(CONFIG_X86)
-- if (cpu_has_clflush) {
-- ttm_tt_cache_flush_clflush(pages, num_pages);
-- return;
-- }
--#elif defined(__powerpc__)
-- unsigned long i;
--
-- for (i = 0; i < num_pages; ++i) {
-- struct page *page = pages[i];
-- void *page_virtual;
--
-- if (unlikely(page == NULL))
-- continue;
--
-- page_virtual = kmap_atomic(page, KM_USER0);
-- flush_dcache_range((unsigned long) page_virtual,
-- (unsigned long) page_virtual + PAGE_SIZE);
-- kunmap_atomic(page_virtual, KM_USER0);
-- }
--#else
-- if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
-- printk(KERN_ERR TTM_PFX
-- "Timed out waiting for drm cache flush.\n");
--#endif
--}
--
- /**
- * Allocates storage for pointers to the pages that back the ttm.
- *
-@@ -179,7 +116,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
- set_page_dirty_lock(page);
-
- ttm->pages[i] = NULL;
-- ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
-+ ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
- put_page(page);
- }
- ttm->state = tt_unpopulated;
-@@ -190,8 +127,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
- static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
- {
- struct page *p;
-- struct ttm_bo_device *bdev = ttm->bdev;
-- struct ttm_mem_global *mem_glob = bdev->mem_glob;
-+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- int ret;
-
- while (NULL == (p = ttm->pages[index])) {
-@@ -200,21 +136,14 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
- if (!p)
- return NULL;
-
-- if (PageHighMem(p)) {
-- ret =
-- ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
-- false, false, true);
-- if (unlikely(ret != 0))
-- goto out_err;
-+ ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
-+ if (unlikely(ret != 0))
-+ goto out_err;
-+
-+ if (PageHighMem(p))
- ttm->pages[--ttm->first_himem_page] = p;
-- } else {
-- ret =
-- ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
-- false, false, false);
-- if (unlikely(ret != 0))
-- goto out_err;
-+ else
- ttm->pages[++ttm->last_lomem_page] = p;
-- }
- }
- return p;
- out_err:
-@@ -310,7 +239,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
- }
-
- if (ttm->caching_state == tt_cached)
-- ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
-+ drm_clflush_pages(ttm->pages, ttm->num_pages);
-
- for (i = 0; i < ttm->num_pages; ++i) {
- cur_page = ttm->pages[i];
-@@ -368,8 +297,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
- printk(KERN_ERR TTM_PFX
- "Erroneous page count. "
- "Leaking pages.\n");
-- ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
-- PageHighMem(cur_page));
-+ ttm_mem_global_free_page(ttm->glob->mem_glob,
-+ cur_page);
- __free_page(cur_page);
- }
- }
-@@ -414,7 +343,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
- struct mm_struct *mm = tsk->mm;
- int ret;
- int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
-- struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
-+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-
- BUG_ON(num_pages != ttm->num_pages);
- BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
-@@ -424,7 +353,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
- */
-
- ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
-- false, false, false);
-+ false, false);
- if (unlikely(ret != 0))
- return ret;
-
-@@ -435,7 +364,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
-
- if (ret != num_pages && write) {
- ttm_tt_free_user_pages(ttm);
-- ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
-+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
- return -ENOMEM;
- }
-
-@@ -459,8 +388,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
- if (!ttm)
- return NULL;
-
-- ttm->bdev = bdev;
--
-+ ttm->glob = bdev->glob;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
-diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index 45b67d9..eeefb63 100644
---- a/include/drm/drmP.h
-+++ b/include/drm/drmP.h
-@@ -88,7 +88,37 @@ struct drm_device;
- #define DRM_UT_CORE 0x01
- #define DRM_UT_DRIVER 0x02
- #define DRM_UT_KMS 0x04
--#define DRM_UT_MODE 0x08
-+/*
-+ * Three debug levels are defined.
-+ * drm_core, drm_driver, drm_kms
-+ * drm_core level can be used in the generic drm code. For example:
-+ * drm_ioctl, drm_mm, drm_memory
-+ * The macro definiton of DRM_DEBUG is used.
-+ * DRM_DEBUG(fmt, args...)
-+ * The debug info by using the DRM_DEBUG can be obtained by adding
-+ * the boot option of "drm.debug=1".
-+ *
-+ * drm_driver level can be used in the specific drm driver. It is used
-+ * to add the debug info related with the drm driver. For example:
-+ * i915_drv, i915_dma, i915_gem, radeon_drv,
-+ * The macro definition of DRM_DEBUG_DRIVER can be used.
-+ * DRM_DEBUG_DRIVER(fmt, args...)
-+ * The debug info by using the DRM_DEBUG_DRIVER can be obtained by
-+ * adding the boot option of "drm.debug=0x02"
-+ *
-+ * drm_kms level can be used in the KMS code related with specific drm driver.
-+ * It is used to add the debug info related with KMS mode. For example:
-+ * the connector/crtc ,
-+ * The macro definition of DRM_DEBUG_KMS can be used.
-+ * DRM_DEBUG_KMS(fmt, args...)
-+ * The debug info by using the DRM_DEBUG_KMS can be obtained by
-+ * adding the boot option of "drm.debug=0x04"
-+ *
-+ * If we add the boot option of "drm.debug=0x06", we can get the debug info by
-+ * using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER.
-+ * If we add the boot option of "drm.debug=0x05", we can get the debug info by
-+ * using the DRM_DEBUG_KMS and DRM_DEBUG.
-+ */
-
- extern void drm_ut_debug_printk(unsigned int request_level,
- const char *prefix,
-@@ -174,19 +204,14 @@ extern void drm_ut_debug_printk(unsigned int request_level,
- __func__, fmt, ##args); \
- } while (0)
-
--#define DRM_DEBUG_DRIVER(prefix, fmt, args...) \
-+#define DRM_DEBUG_DRIVER(fmt, args...) \
- do { \
-- drm_ut_debug_printk(DRM_UT_DRIVER, prefix, \
-+ drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME, \
- __func__, fmt, ##args); \
- } while (0)
--#define DRM_DEBUG_KMS(prefix, fmt, args...) \
-- do { \
-- drm_ut_debug_printk(DRM_UT_KMS, prefix, \
-- __func__, fmt, ##args); \
-- } while (0)
--#define DRM_DEBUG_MODE(prefix, fmt, args...) \
-+#define DRM_DEBUG_KMS(fmt, args...) \
- do { \
-- drm_ut_debug_printk(DRM_UT_MODE, prefix, \
-+ drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \
- __func__, fmt, ##args); \
- } while (0)
- #define DRM_LOG(fmt, args...) \
-@@ -210,9 +235,8 @@ extern void drm_ut_debug_printk(unsigned int request_level,
- NULL, fmt, ##args); \
- } while (0)
- #else
--#define DRM_DEBUG_DRIVER(prefix, fmt, args...) do { } while (0)
--#define DRM_DEBUG_KMS(prefix, fmt, args...) do { } while (0)
--#define DRM_DEBUG_MODE(prefix, fmt, args...) do { } while (0)
-+#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
-+#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
- #define DRM_DEBUG(fmt, arg...) do { } while (0)
- #define DRM_LOG(fmt, arg...) do { } while (0)
- #define DRM_LOG_KMS(fmt, args...) do { } while (0)
-@@ -1417,7 +1441,7 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
-
- int drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
-- int *handlep);
-+ u32 *handlep);
-
- static inline void
- drm_gem_object_handle_reference(struct drm_gem_object *obj)
-@@ -1443,7 +1467,7 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
-
- struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
- struct drm_file *filp,
-- int handle);
-+ u32 handle);
- int drm_gem_close_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
-new file mode 100644
-index 0000000..7bfb063
---- /dev/null
-+++ b/include/drm/drm_cache.h
-@@ -0,0 +1,38 @@
-+/**************************************************************************
-+ *
-+ * Copyright 2009 Red Hat Inc.
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors:
-+ * Dave Airlie <airlied@redhat.com>
-+ */
-+
-+#ifndef _DRM_CACHE_H_
-+#define _DRM_CACHE_H_
-+
-+void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
-+
-+#endif
-diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
-index 7300fb8..ae1e9e1 100644
---- a/include/drm/drm_crtc.h
-+++ b/include/drm/drm_crtc.h
-@@ -259,6 +259,8 @@ struct drm_framebuffer {
- void *fbdev;
- u32 pseudo_palette[17];
- struct list_head filp_head;
-+ /* if you are using the helper */
-+ void *helper_private;
- };
-
- struct drm_property_blob {
-@@ -572,6 +574,12 @@ struct drm_mode_config {
- struct drm_property *tv_right_margin_property;
- struct drm_property *tv_top_margin_property;
- struct drm_property *tv_bottom_margin_property;
-+ struct drm_property *tv_brightness_property;
-+ struct drm_property *tv_contrast_property;
-+ struct drm_property *tv_flicker_reduction_property;
-+ struct drm_property *tv_overscan_property;
-+ struct drm_property *tv_saturation_property;
-+ struct drm_property *tv_hue_property;
-
- /* Optional properties */
- struct drm_property *scaling_mode_property;
-@@ -736,4 +744,12 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
- extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
- extern bool drm_detect_hdmi_monitor(struct edid *edid);
-+extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
-+ int hdisplay, int vdisplay, int vrefresh,
-+ bool reduced, bool interlaced);
-+extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
-+ int hdisplay, int vdisplay, int vrefresh,
-+ bool interlaced, int margins);
-+extern int drm_add_modes_noedid(struct drm_connector *connector,
-+ int hdisplay, int vdisplay);
- #endif /* __DRM_CRTC_H__ */
-diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
-index 6769ff6..4c8daca 100644
---- a/include/drm/drm_crtc_helper.h
-+++ b/include/drm/drm_crtc_helper.h
-@@ -79,6 +79,8 @@ struct drm_encoder_helper_funcs {
- /* detect for DAC style encoders */
- enum drm_connector_status (*detect)(struct drm_encoder *encoder,
- struct drm_connector *connector);
-+ /* disable encoder when not in use - more explicit than dpms off */
-+ void (*disable)(struct drm_encoder *encoder);
- };
-
- struct drm_connector_helper_funcs {
-@@ -98,6 +100,7 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *old_fb);
- extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
-+extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
-
- extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
-
-diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
-new file mode 100644
-index 0000000..2f65633
---- /dev/null
-+++ b/include/drm/drm_encoder_slave.h
-@@ -0,0 +1,162 @@
-+/*
-+ * Copyright (C) 2009 Francisco Jerez.
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining
-+ * a copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sublicense, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial
-+ * portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ */
-+
-+#ifndef __DRM_ENCODER_SLAVE_H__
-+#define __DRM_ENCODER_SLAVE_H__
-+
-+#include "drmP.h"
-+#include "drm_crtc.h"
-+
-+/**
-+ * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver
-+ * @set_config: Initialize any encoder-specific modesetting parameters.
-+ * The meaning of the @params parameter is implementation
-+ * dependent. It will usually be a structure with DVO port
-+ * data format settings or timings. It's not required for
-+ * the new parameters to take effect until the next mode
-+ * is set.
-+ *
-+ * Most of its members are analogous to the function pointers in
-+ * &drm_encoder_helper_funcs and they can optionally be used to
-+ * initialize the latter. Connector-like methods (e.g. @get_modes and
-+ * @set_property) will typically be wrapped around and only be called
-+ * if the encoder is the currently selected one for the connector.
-+ */
-+struct drm_encoder_slave_funcs {
-+ void (*set_config)(struct drm_encoder *encoder,
-+ void *params);
-+
-+ void (*destroy)(struct drm_encoder *encoder);
-+ void (*dpms)(struct drm_encoder *encoder, int mode);
-+ void (*save)(struct drm_encoder *encoder);
-+ void (*restore)(struct drm_encoder *encoder);
-+ bool (*mode_fixup)(struct drm_encoder *encoder,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode);
-+ int (*mode_valid)(struct drm_encoder *encoder,
-+ struct drm_display_mode *mode);
-+ void (*mode_set)(struct drm_encoder *encoder,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode);
-+
-+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
-+ struct drm_connector *connector);
-+ int (*get_modes)(struct drm_encoder *encoder,
-+ struct drm_connector *connector);
-+ int (*create_resources)(struct drm_encoder *encoder,
-+ struct drm_connector *connector);
-+ int (*set_property)(struct drm_encoder *encoder,
-+ struct drm_connector *connector,
-+ struct drm_property *property,
-+ uint64_t val);
-+
-+};
-+
-+/**
-+ * struct drm_encoder_slave - Slave encoder struct
-+ * @base: DRM encoder object.
-+ * @slave_funcs: Slave encoder callbacks.
-+ * @slave_priv: Slave encoder private data.
-+ * @bus_priv: Bus specific data.
-+ *
-+ * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the
-+ * ones in @base. The former are never actually called by the common
-+ * CRTC code, it's just a convenience for splitting the encoder
-+ * functions in an upper, GPU-specific layer and a (hopefully)
-+ * GPU-agnostic lower layer: It's the GPU driver responsibility to
-+ * call the slave methods when appropriate.
-+ *
-+ * drm_i2c_encoder_init() provides a way to get an implementation of
-+ * this.
-+ */
-+struct drm_encoder_slave {
-+ struct drm_encoder base;
-+
-+ struct drm_encoder_slave_funcs *slave_funcs;
-+ void *slave_priv;
-+ void *bus_priv;
-+};
-+#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base)
-+
-+int drm_i2c_encoder_init(struct drm_device *dev,
-+ struct drm_encoder_slave *encoder,
-+ struct i2c_adapter *adap,
-+ const struct i2c_board_info *info);
-+
-+
-+/**
-+ * struct drm_i2c_encoder_driver
-+ *
-+ * Describes a device driver for an encoder connected to the GPU
-+ * through an I2C bus. In addition to the entry points in @i2c_driver
-+ * an @encoder_init function should be provided. It will be called to
-+ * give the driver an opportunity to allocate any per-encoder data
-+ * structures and to initialize the @slave_funcs and (optionally)
-+ * @slave_priv members of @encoder.
-+ */
-+struct drm_i2c_encoder_driver {
-+ struct i2c_driver i2c_driver;
-+
-+ int (*encoder_init)(struct i2c_client *client,
-+ struct drm_device *dev,
-+ struct drm_encoder_slave *encoder);
-+
-+};
-+#define to_drm_i2c_encoder_driver(x) container_of((x), \
-+ struct drm_i2c_encoder_driver, \
-+ i2c_driver)
-+
-+/**
-+ * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder
-+ */
-+static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder)
-+{
-+ return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv;
-+}
-+
-+/**
-+ * drm_i2c_encoder_register - Register an I2C encoder driver
-+ * @owner: Module containing the driver.
-+ * @driver: Driver to be registered.
-+ */
-+static inline int drm_i2c_encoder_register(struct module *owner,
-+ struct drm_i2c_encoder_driver *driver)
-+{
-+ return i2c_register_driver(owner, &driver->i2c_driver);
-+}
-+
-+/**
-+ * drm_i2c_encoder_unregister - Unregister an I2C encoder driver
-+ * @driver: Driver to be unregistered.
-+ */
-+static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver)
-+{
-+ i2c_del_driver(&driver->i2c_driver);
-+}
-+
-+void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
-+
-+#endif
-diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
-new file mode 100644
-index 0000000..88fffbd
---- /dev/null
-+++ b/include/drm/drm_fb_helper.h
-@@ -0,0 +1,82 @@
-+/*
-+ * Copyright (c) 2006-2009 Red Hat Inc.
-+ * Copyright (c) 2006-2008 Intel Corporation
-+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
-+ *
-+ * DRM framebuffer helper functions
-+ *
-+ * Permission to use, copy, modify, distribute, and sell this software and its
-+ * documentation for any purpose is hereby granted without fee, provided that
-+ * the above copyright notice appear in all copies and that both that copyright
-+ * notice and this permission notice appear in supporting documentation, and
-+ * that the name of the copyright holders not be used in advertising or
-+ * publicity pertaining to distribution of the software without specific,
-+ * written prior permission. The copyright holders make no representations
-+ * about the suitability of this software for any purpose. It is provided "as
-+ * is" without express or implied warranty.
-+ *
-+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
-+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
-+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-+ * OF THIS SOFTWARE.
-+ *
-+ * Authors:
-+ * Dave Airlie <airlied@linux.ie>
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+#ifndef DRM_FB_HELPER_H
-+#define DRM_FB_HELPER_H
-+
-+struct drm_fb_helper_crtc {
-+ uint32_t crtc_id;
-+ struct drm_mode_set mode_set;
-+};
-+
-+struct drm_fb_helper_funcs {
-+ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
-+ u16 blue, int regno);
-+};
-+
-+struct drm_fb_helper {
-+ struct drm_framebuffer *fb;
-+ struct drm_device *dev;
-+ struct drm_display_mode *mode;
-+ int crtc_count;
-+ struct drm_fb_helper_crtc *crtc_info;
-+ struct drm_fb_helper_funcs *funcs;
-+ int conn_limit;
-+ struct list_head kernel_fb_list;
-+};
-+
-+int drm_fb_helper_single_fb_probe(struct drm_device *dev,
-+ int (*fb_create)(struct drm_device *dev,
-+ uint32_t fb_width,
-+ uint32_t fb_height,
-+ uint32_t surface_width,
-+ uint32_t surface_height,
-+ struct drm_framebuffer **fb_ptr));
-+int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count,
-+ int max_conn);
-+void drm_fb_helper_free(struct drm_fb_helper *helper);
-+int drm_fb_helper_blank(int blank, struct fb_info *info);
-+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
-+ struct fb_info *info);
-+int drm_fb_helper_set_par(struct fb_info *info);
-+int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
-+ struct fb_info *info);
-+int drm_fb_helper_setcolreg(unsigned regno,
-+ unsigned red,
-+ unsigned green,
-+ unsigned blue,
-+ unsigned transp,
-+ struct fb_info *info);
-+
-+void drm_fb_helper_restore(void);
-+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
-+ uint32_t fb_width, uint32_t fb_height);
-+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch);
-+
-+#endif
-diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
-index f833207..62329f9 100644
---- a/include/drm/drm_mm.h
-+++ b/include/drm/drm_mm.h
-@@ -37,6 +37,9 @@
- * Generic range manager structs
- */
- #include <linux/list.h>
-+#ifdef CONFIG_DEBUG_FS
-+#include <linux/seq_file.h>
-+#endif
-
- struct drm_mm_node {
- struct list_head fl_entry;
-@@ -96,4 +99,8 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
- return block->mm;
- }
-
-+#ifdef CONFIG_DEBUG_FS
-+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
-+#endif
-+
- #endif
-diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
-index ae304cc..1f90841 100644
---- a/include/drm/drm_mode.h
-+++ b/include/drm/drm_mode.h
-@@ -68,10 +68,11 @@
- #define DRM_MODE_DPMS_OFF 3
-
- /* Scaling mode options */
--#define DRM_MODE_SCALE_NON_GPU 0
--#define DRM_MODE_SCALE_FULLSCREEN 1
--#define DRM_MODE_SCALE_NO_SCALE 2
--#define DRM_MODE_SCALE_ASPECT 3
-+#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
-+ software can still scale) */
-+#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
-+#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
-+#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
-
- /* Dithering mode options */
- #define DRM_MODE_DITHERING_OFF 0
-@@ -141,6 +142,7 @@ struct drm_mode_get_encoder {
- #define DRM_MODE_SUBCONNECTOR_Composite 5
- #define DRM_MODE_SUBCONNECTOR_SVIDEO 6
- #define DRM_MODE_SUBCONNECTOR_Component 8
-+#define DRM_MODE_SUBCONNECTOR_SCART 9
-
- #define DRM_MODE_CONNECTOR_Unknown 0
- #define DRM_MODE_CONNECTOR_VGA 1
-@@ -155,6 +157,7 @@ struct drm_mode_get_encoder {
- #define DRM_MODE_CONNECTOR_DisplayPort 10
- #define DRM_MODE_CONNECTOR_HDMIA 11
- #define DRM_MODE_CONNECTOR_HDMIB 12
-+#define DRM_MODE_CONNECTOR_TV 13
-
- struct drm_mode_get_connector {
-
-diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
-new file mode 100644
-index 0000000..1d8e033
---- /dev/null
-+++ b/include/drm/drm_sysfs.h
-@@ -0,0 +1,12 @@
-+#ifndef _DRM_SYSFS_H_
-+#define _DRM_SYSFS_H_
-+
-+/**
-+ * This minimalistic include file is intended for users (read TTM) that
-+ * don't want to include the full drmP.h file.
-+ */
-+
-+extern int drm_class_device_register(struct device *dev);
-+extern void drm_class_device_unregister(struct device *dev);
-+
-+#endif
-diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
-index cd22ab4..4911461 100644
---- a/include/drm/ttm/ttm_bo_api.h
-+++ b/include/drm/ttm/ttm_bo_api.h
-@@ -155,6 +155,7 @@ struct ttm_buffer_object {
- * Members constant at init.
- */
-
-+ struct ttm_bo_global *glob;
- struct ttm_bo_device *bdev;
- unsigned long buffer_start;
- enum ttm_bo_type type;
-@@ -245,14 +246,15 @@ struct ttm_buffer_object {
- * premapped region.
- */
-
-+#define TTM_BO_MAP_IOMEM_MASK 0x80
- struct ttm_bo_kmap_obj {
- void *virtual;
- struct page *page;
- enum {
-- ttm_bo_map_iomap,
-- ttm_bo_map_vmap,
-- ttm_bo_map_kmap,
-- ttm_bo_map_premapped,
-+ ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
-+ ttm_bo_map_vmap = 2,
-+ ttm_bo_map_kmap = 3,
-+ ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
- } bo_kmap_type;
- };
-
-@@ -522,8 +524,7 @@ extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
- static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
- bool *is_iomem)
- {
-- *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
-- map->bo_kmap_type == ttm_bo_map_premapped);
-+ *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
- return map->virtual;
- }
-
-diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
-index a68829d..e8cd6d2 100644
---- a/include/drm/ttm/ttm_bo_driver.h
-+++ b/include/drm/ttm/ttm_bo_driver.h
-@@ -32,6 +32,7 @@
-
- #include "ttm/ttm_bo_api.h"
- #include "ttm/ttm_memory.h"
-+#include "ttm/ttm_module.h"
- #include "drm_mm.h"
- #include "linux/workqueue.h"
- #include "linux/fs.h"
-@@ -161,7 +162,7 @@ struct ttm_tt {
- long last_lomem_page;
- uint32_t page_flags;
- unsigned long num_pages;
-- struct ttm_bo_device *bdev;
-+ struct ttm_bo_global *glob;
- struct ttm_backend *be;
- struct task_struct *tsk;
- unsigned long start;
-@@ -364,24 +365,73 @@ struct ttm_bo_driver {
- void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
- };
-
--#define TTM_NUM_MEM_TYPES 8
-+/**
-+ * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
-+ */
-+
-+struct ttm_bo_global_ref {
-+ struct ttm_global_reference ref;
-+ struct ttm_mem_global *mem_glob;
-+};
-
--#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
-- idling before CPU mapping */
--#define TTM_BO_PRIV_FLAG_MAX 1
- /**
-- * struct ttm_bo_device - Buffer object driver device-specific data.
-+ * struct ttm_bo_global - Buffer object driver global data.
- *
- * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
-- * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
-- * @count: Current number of buffer object.
-- * @pages: Current number of pinned pages.
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
-- * @shrink: A shrink callback object used for buffre object swap.
-+ * @shrink: A shrink callback object used for buffer object swap.
- * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
- * used by a buffer object. This is excluding page arrays and backing pages.
- * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
-+ * @device_list_mutex: Mutex protecting the device list.
-+ * This mutex is held while traversing the device list for pm options.
-+ * @lru_lock: Spinlock protecting the bo subsystem lru lists.
-+ * @device_list: List of buffer object devices.
-+ * @swap_lru: Lru list of buffer objects used for swapping.
-+ */
-+
-+struct ttm_bo_global {
-+
-+ /**
-+ * Constant after init.
-+ */
-+
-+ struct kobject kobj;
-+ struct ttm_mem_global *mem_glob;
-+ struct page *dummy_read_page;
-+ struct ttm_mem_shrink shrink;
-+ size_t ttm_bo_extra_size;
-+ size_t ttm_bo_size;
-+ struct mutex device_list_mutex;
-+ spinlock_t lru_lock;
-+
-+ /**
-+ * Protected by device_list_mutex.
-+ */
-+ struct list_head device_list;
-+
-+ /**
-+ * Protected by the lru_lock.
-+ */
-+ struct list_head swap_lru;
-+
-+ /**
-+ * Internal protection.
-+ */
-+ atomic_t bo_count;
-+};
-+
-+
-+#define TTM_NUM_MEM_TYPES 8
-+
-+#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
-+ idling before CPU mapping */
-+#define TTM_BO_PRIV_FLAG_MAX 1
-+/**
-+ * struct ttm_bo_device - Buffer object driver device-specific data.
-+ *
-+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
- * @man: An array of mem_type_managers.
- * @addr_space_mm: Range manager for the device address space.
- * lru_lock: Spinlock that protects the buffer+device lru lists and
-@@ -399,32 +449,21 @@ struct ttm_bo_device {
- /*
- * Constant after bo device init / atomic.
- */
--
-- struct ttm_mem_global *mem_glob;
-+ struct list_head device_list;
-+ struct ttm_bo_global *glob;
- struct ttm_bo_driver *driver;
-- struct page *dummy_read_page;
-- struct ttm_mem_shrink shrink;
--
-- size_t ttm_bo_extra_size;
-- size_t ttm_bo_size;
--
- rwlock_t vm_lock;
-+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
- /*
- * Protected by the vm lock.
- */
-- struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
- struct rb_root addr_space_rb;
- struct drm_mm addr_space_mm;
-
- /*
-- * Might want to change this to one lock per manager.
-- */
-- spinlock_t lru_lock;
-- /*
-- * Protected by the lru lock.
-+ * Protected by the global:lru lock.
- */
- struct list_head ddestroy;
-- struct list_head swap_lru;
-
- /*
- * Protected by load / firstopen / lastclose /unload sync.
-@@ -640,6 +679,9 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
- unsigned long *bus_offset,
- unsigned long *bus_size);
-
-+extern void ttm_bo_global_release(struct ttm_global_reference *ref);
-+extern int ttm_bo_global_init(struct ttm_global_reference *ref);
-+
- extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
-
- /**
-@@ -657,7 +699,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
- * !0: Failure.
- */
- extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
-- struct ttm_mem_global *mem_glob,
-+ struct ttm_bo_global *glob,
- struct ttm_bo_driver *driver,
- uint64_t file_page_offset, bool need_dma32);
-
-diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
-index d8b8f04..6983a7c 100644
---- a/include/drm/ttm/ttm_memory.h
-+++ b/include/drm/ttm/ttm_memory.h
-@@ -32,6 +32,7 @@
- #include <linux/spinlock.h>
- #include <linux/wait.h>
- #include <linux/errno.h>
-+#include <linux/kobject.h>
-
- /**
- * struct ttm_mem_shrink - callback to shrink TTM memory usage.
-@@ -60,34 +61,33 @@ struct ttm_mem_shrink {
- * @queue: Wait queue for processes suspended waiting for memory.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
-- * @emer_memory: Lowmem memory limit available for root.
-- * @max_memory: Lowmem memory limit available for non-root.
-- * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
-- * @used_memory: Currently used lowmem memory.
-- * @used_total_memory: Currently used total (lowmem + highmem) memory.
-- * @total_memory_swap_limit: Total memory limit where the shrink workqueue
-- * kicks in.
-- * @max_total_memory: Total memory available to non-root processes.
-- * @emer_total_memory: Total memory available to root processes.
-+ * @zones: Array of pointers to accounting zones.
-+ * @num_zones: Number of populated entries in the @zones array.
-+ * @zone_kernel: Pointer to the kernel zone.
-+ * @zone_highmem: Pointer to the highmem zone if there is one.
-+ * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-+#define TTM_MEM_MAX_ZONES 2
-+struct ttm_mem_zone;
- struct ttm_mem_global {
-+ struct kobject kobj;
- struct ttm_mem_shrink *shrink;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
- wait_queue_head_t queue;
- spinlock_t lock;
-- uint64_t emer_memory;
-- uint64_t max_memory;
-- uint64_t swap_limit;
-- uint64_t used_memory;
-- uint64_t used_total_memory;
-- uint64_t total_memory_swap_limit;
-- uint64_t max_total_memory;
-- uint64_t emer_total_memory;
-+ struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
-+ unsigned int num_zones;
-+ struct ttm_mem_zone *zone_kernel;
-+#ifdef CONFIG_HIGHMEM
-+ struct ttm_mem_zone *zone_highmem;
-+#else
-+ struct ttm_mem_zone *zone_dma32;
-+#endif
- };
-
- /**
-@@ -146,8 +146,13 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
- extern int ttm_mem_global_init(struct ttm_mem_global *glob);
- extern void ttm_mem_global_release(struct ttm_mem_global *glob);
- extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
-- bool no_wait, bool interruptible, bool himem);
-+ bool no_wait, bool interruptible);
- extern void ttm_mem_global_free(struct ttm_mem_global *glob,
-- uint64_t amount, bool himem);
-+ uint64_t amount);
-+extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
-+ struct page *page,
-+ bool no_wait, bool interruptible);
-+extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
-+ struct page *page);
- extern size_t ttm_round_pot(size_t size);
- #endif
-diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
-index d1d4338..cf416ae 100644
---- a/include/drm/ttm/ttm_module.h
-+++ b/include/drm/ttm/ttm_module.h
-@@ -32,6 +32,7 @@
- #define _TTM_MODULE_H_
-
- #include <linux/kernel.h>
-+struct kobject;
-
- #define TTM_PFX "[TTM] "
-
-@@ -54,5 +55,6 @@ extern void ttm_global_init(void);
- extern void ttm_global_release(void);
- extern int ttm_global_item_ref(struct ttm_global_reference *ref);
- extern void ttm_global_item_unref(struct ttm_global_reference *ref);
-+extern struct kobject *ttm_get_kobj(void);
-
- #endif /* _TTM_MODULE_H_ */
diff --git a/freed-ora/current/F-12/drm-nouveau-d620.patch b/freed-ora/current/F-12/drm-nouveau-d620.patch
new file mode 100644
index 000000000..601e200eb
--- /dev/null
+++ b/freed-ora/current/F-12/drm-nouveau-d620.patch
@@ -0,0 +1,121 @@
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index 6b6c303..a81c738 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -3198,7 +3198,6 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
+ struct nvbios *bios = &dev_priv->vbios;
+ unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
+ uint16_t scriptptr = 0, clktable;
+- uint8_t clktableptr = 0;
+
+ /*
+ * For now we assume version 3.0 table - g80 support will need some
+@@ -3217,26 +3216,29 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
+ scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
+ break;
+ case LVDS_RESET:
++ clktable = bios->fp.lvdsmanufacturerpointer + 15;
++ if (dcbent->or == 4)
++ clktable += 8;
++
+ if (dcbent->lvdsconf.use_straps_for_mode) {
+ if (bios->fp.dual_link)
+- clktableptr += 2;
+- if (bios->fp.BITbit1)
+- clktableptr++;
++ clktable += 4;
++ if (bios->fp.if_is_24bit)
++ clktable += 2;
+ } else {
+ /* using EDID */
+- uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+- int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
++ int cmpval_24bit = (dcbent->or == 4) ? 4 : 1;
+
+ if (bios->fp.dual_link) {
+- clktableptr += 2;
+- fallbackcmpval *= 2;
++ clktable += 4;
++ cmpval_24bit <<= 1;
+ }
+- if (fallbackcmpval & fallback)
+- clktableptr++;
++
++ if (bios->fp.strapless_is_24bit & cmpval_24bit)
++ clktable += 2;
+ }
+
+- /* adding outputset * 8 may not be correct */
+- clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
++ clktable = ROM16(bios->data[clktable]);
+ if (!clktable) {
+ NV_ERROR(dev, "Pixel clock comparison table not found\n");
+ return -ENOENT;
+@@ -3638,37 +3640,40 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
+ *if_is_24bit = bios->data[lvdsofs] & 16;
+ break;
+ case 0x30:
+- /*
+- * My money would be on there being a 24 bit interface bit in
+- * this table, but I have no example of a laptop bios with a
+- * 24 bit panel to confirm that. Hence we shout loudly if any
+- * bit other than bit 0 is set (I've not even seen bit 1)
+- */
+- if (bios->data[lvdsofs] > 1)
+- NV_ERROR(dev,
+- "You have a very unusual laptop display; please report it\n");
++ case 0x40:
+ /*
+ * No sign of the "power off for reset" or "reset for panel
+ * on" bits, but it's safer to assume we should
+ */
+ bios->fp.power_off_for_reset = true;
+ bios->fp.reset_after_pclk_change = true;
++
+ /*
+ * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
+- * over-written, and BITbit1 isn't used
++ * over-written, and if_is_24bit isn't used
+ */
+ bios->fp.dual_link = bios->data[lvdsofs] & 1;
+- bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
+- bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+- break;
+- case 0x40:
+- bios->fp.dual_link = bios->data[lvdsofs] & 1;
+ bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
+ bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+ bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+ break;
+ }
+
++ /* Dell Latitude D620 reports a too-high value for the dual-link
++ * transition freq, causing us to program the panel incorrectly.
++ *
++ * It doesn't appear the VBIOS actually uses its transition freq
++ * (90000kHz), instead it uses the "Number of LVDS channels" field
++ * out of the panel ID structure (http://www.spwg.org/).
++ *
++ * For the moment, a quirk will do :)
++ */
++ if ((dev->pdev->device == 0x01d7) &&
++ (dev->pdev->subsystem_vendor == 0x1028) &&
++ (dev->pdev->subsystem_device == 0x01c2)) {
++ bios->fp.duallink_transition_clk = 80000;
++ }
++
+ /* set dual_link flag for EDID case */
+ if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
+ bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
+index 4f88e69..fd6274a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
+@@ -267,7 +267,6 @@ struct nvbios {
+ bool reset_after_pclk_change;
+ bool dual_link;
+ bool link_c_increment;
+- bool BITbit1;
+ bool if_is_24bit;
+ int duallink_transition_clk;
+ uint8_t strapless_is_24bit;
diff --git a/freed-ora/current/F-12/drm-nouveau-kconfig.patch b/freed-ora/current/F-12/drm-nouveau-kconfig.patch
new file mode 100644
index 000000000..208400b1a
--- /dev/null
+++ b/freed-ora/current/F-12/drm-nouveau-kconfig.patch
@@ -0,0 +1,11 @@
+--- a/drivers/staging/Kconfig 2010-01-12 13:37:11.000000000 +1000
++++ b/drivers/staging/Kconfig 2010-01-12 13:37:24.000000000 +1000
+@@ -103,6 +103,8 @@
+
+ source "drivers/staging/line6/Kconfig"
+
++source "drivers/gpu/drm/nouveau/Kconfig"
++
+ source "drivers/staging/octeon/Kconfig"
+
+ source "drivers/staging/serqt_usb2/Kconfig"
diff --git a/freed-ora/current/F-12/drm-nouveau-mutex.patch b/freed-ora/current/F-12/drm-nouveau-mutex.patch
new file mode 100644
index 000000000..4bc1fc133
--- /dev/null
+++ b/freed-ora/current/F-12/drm-nouveau-mutex.patch
@@ -0,0 +1,56 @@
+From 967c89306de560a6da9539d24a0d63cb036d58ed Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Tue, 16 Feb 2010 11:14:14 +1000
+Subject: [PATCH] drm/nouveau: use mutex for vbios lock
+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+---
+ drivers/gpu/drm/nouveau/nouveau_bios.c | 7 +++----
+ drivers/gpu/drm/nouveau/nouveau_bios.h | 2 +-
+ 2 files changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index 2cd0fad..0e9cd1d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct init_exec iexec = { true, false };
+- unsigned long flags;
+
+- spin_lock_irqsave(&bios->lock, flags);
++ mutex_lock(&bios->lock);
+ bios->display.output = dcbent;
+ parse_init_table(bios, table, &iexec);
+ bios->display.output = NULL;
+- spin_unlock_irqrestore(&bios->lock, flags);
++ mutex_unlock(&bios->lock);
+ }
+
+ static bool NVInitVBIOS(struct drm_device *dev)
+@@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
+ struct nvbios *bios = &dev_priv->VBIOS;
+
+ memset(bios, 0, sizeof(struct nvbios));
+- spin_lock_init(&bios->lock);
++ mutex_init(&bios->lock);
+ bios->dev = dev;
+
+ if (!NVShadowVBIOS(dev, bios->data))
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
+index 68446fd..fd94bd6 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
+@@ -205,7 +205,7 @@ struct nvbios {
+ struct drm_device *dev;
+ struct nouveau_bios_info pub;
+
+- spinlock_t lock;
++ struct mutex lock;
+
+ uint8_t data[NV_PROM_SIZE];
+ unsigned int length;
+--
+1.6.6.1
+
diff --git a/freed-ora/current/F-12/drm-nouveau-nva3-noaccel.patch b/freed-ora/current/F-12/drm-nouveau-nva3-noaccel.patch
new file mode 100644
index 000000000..505c7240e
--- /dev/null
+++ b/freed-ora/current/F-12/drm-nouveau-nva3-noaccel.patch
@@ -0,0 +1,105 @@
+From 030e105efc9a29c7d34fb59fb0e0a40e54178299 Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Wed, 30 Jun 2010 13:34:05 +1000
+Subject: [PATCH] drm/nouveau: disable acceleration on NVA3/NVA5/NVA8 by default
+
+There's an GPU lockup problem for which the cause is currently unknown
+on these chipsets.
+
+Until it's resolved, it's better to leave the user with a working system
+without acceleration than to have random lockups.
+
+With this patch, acceleration will be off by default if a known problem
+chipset is detected, but can be re-enabled with nouveau.noaccel=0 on
+the kernel commandline.
+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+
+[ cebbert@redhat.com : Backport to F12 and fix some module parameter descriptions. ]
+---
+
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
+@@ -75,11 +75,11 @@ int nouveau_ignorelid = 0;
+ int nouveau_ignorelid = 0;
+ module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
+
+-MODULE_PARM_DESC(noagp, "Disable all acceleration");
++MODULE_PARM_DESC(noaccel, "Disable all acceleration");
+-int nouveau_noaccel = 0;
++int nouveau_noaccel = -1;
+ module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
+-MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
++MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
+ int nouveau_nofbaccel = 0;
+ module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -493,6 +493,7 @@ enum nouveau_card_type {
+
+ struct drm_nouveau_private {
+ struct drm_device *dev;
++ bool noaccel;
+ enum {
+ NOUVEAU_CARD_INIT_DOWN,
+ NOUVEAU_CARD_INIT_DONE,
+--- a/drivers/gpu/drm/nouveau/nouveau_state.c
++++ b/drivers/gpu/drm/nouveau/nouveau_state.c
+@@ -435,7 +435,7 @@ nouveau_card_init(struct drm_device *dev)
+ if (ret)
+ goto out_timer;
+
+- if (nouveau_noaccel)
++ if (dev_priv->noaccel)
+ engine->graph.accel_blocked = true;
+ else {
+ /* PGRAPH */
+@@ -491,10 +491,10 @@ out_display:
+ out_irq:
+ drm_irq_uninstall(dev);
+ out_fifo:
+- if (!nouveau_noaccel)
++ if (!dev_priv->noaccel)
+ engine->fifo.takedown(dev);
+ out_graph:
+- if (!nouveau_noaccel)
++ if (!dev_priv->noaccel)
+ engine->graph.takedown(dev);
+ out_fb:
+ engine->fb.takedown(dev);
+@@ -532,7 +532,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
+ dev_priv->channel = NULL;
+ }
+
+- if (!nouveau_noaccel) {
++ if (!dev_priv->noaccel) {
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ }
+@@ -691,6 +691,21 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
+ NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+ dev_priv->card_type, reg0);
+
++ if (nouveau_noaccel == -1) {
++ switch (dev_priv->chipset) {
++ case 0xa3:
++ case 0xa5:
++ case 0xa8:
++ dev_priv->noaccel = true;
++ break;
++ default:
++ dev_priv->noaccel = false;
++ break;
++ }
++ } else {
++ dev_priv->noaccel = (nouveau_noaccel != 0);
++ }
++
+ /* map larger RAMIN aperture on NV40 cards */
+ dev_priv->ramin = NULL;
+ if (dev_priv->card_type >= NV_40) {
+--
+1.7.2
+
diff --git a/freed-ora/current/F-12/drm-nouveau-safetile-getparam.patch b/freed-ora/current/F-12/drm-nouveau-safetile-getparam.patch
new file mode 100644
index 000000000..09f9734d4
--- /dev/null
+++ b/freed-ora/current/F-12/drm-nouveau-safetile-getparam.patch
@@ -0,0 +1,26 @@
+From 2aa78d8442bd947637ed81da00fd9c22232d2ed0 Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Wed, 9 Sep 2009 16:16:44 +1000
+Subject: [PATCH 5/6] f12: add getparam to know scanout tile_flags is safe
+
+---
+ drivers/gpu/drm/nouveau/nouveau_state.c | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
+index 36f8268..27c4d48 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_state.c
++++ b/drivers/gpu/drm/nouveau/nouveau_state.c
+@@ -861,6 +861,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
+
+ getparam->value = dev_priv->vm_vram_base;
+ break;
++ case 0xdeadcafe00000003: /* NOUVEAU_GETPARAM_SCANOUT_TILEFLAGS */
++ getparam->value = 1;
++ break;
+ case NOUVEAU_GETPARAM_GRAPH_UNITS:
+ /* NV40 and NV50 versions are quite different, but register
+ * address is the same. User is supposed to know the card
+--
+1.6.5.2
+
diff --git a/freed-ora/current/F-12/drm-nouveau-tvout-disable.patch b/freed-ora/current/F-12/drm-nouveau-tvout-disable.patch
new file mode 100644
index 000000000..b04de64a7
--- /dev/null
+++ b/freed-ora/current/F-12/drm-nouveau-tvout-disable.patch
@@ -0,0 +1,57 @@
+From 1c48b3294c0efe6b28ef84139eb838725bb34ccc Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Tue, 8 Sep 2009 13:57:50 +1000
+Subject: [PATCH 4/6] drm/nouveau: disable tv-out by default for the moment
+
+---
+ drivers/gpu/drm/nouveau/nouveau_drv.c | 4 ++++
+ drivers/gpu/drm/nouveau/nouveau_drv.h | 1 +
+ drivers/gpu/drm/nouveau/nv04_display.c | 5 +++++
+ 3 files changed, 10 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
+index 06eb993..d117ab5 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
+@@ -71,6 +71,10 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
+ int nouveau_nofbaccel = 0;
+ module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
++MODULE_PARM_DESC(tv, "Enable TV-out support (<GeForce 8)");
++int nouveau_tv = 0;
++module_param_named(tv, nouveau_tv, int, 0400);
++
+ MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+ "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
+ "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
+index bf9acc6..0941725 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -648,6 +648,7 @@ extern int nouveau_uscript_tmds;
+ extern int nouveau_vram_pushbuf;
+ extern int nouveau_vram_notify;
+ extern int nouveau_fbpercrtc;
++extern int nouveau_tv;
+ extern char *nouveau_tv_norm;
+ extern int nouveau_reg_debug;
+ extern char *nouveau_vbios;
+diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
+index ef77215..dfae228 100644
+--- a/drivers/gpu/drm/nouveau/nv04_display.c
++++ b/drivers/gpu/drm/nouveau/nv04_display.c
+@@ -142,6 +142,11 @@ nv04_display_create(struct drm_device *dev)
+ ret = nv04_dfp_create(dev, dcbent);
+ break;
+ case OUTPUT_TV:
++ if (!nouveau_tv) {
++ NV_INFO(dev, "Enable TV-Out with tv module option\n");
++ continue;
++ }
++
+ if (dcbent->location == DCB_LOC_ON_CHIP)
+ ret = nv17_tv_create(dev, dcbent);
+ else
+--
+1.6.5.2
+
diff --git a/freed-ora/current/F-12/drm-nouveau-update.patch b/freed-ora/current/F-12/drm-nouveau-update.patch
new file mode 100644
index 000000000..6eaa1e10f
--- /dev/null
+++ b/freed-ora/current/F-12/drm-nouveau-update.patch
@@ -0,0 +1,306 @@
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
+index 5445cef..1c15ef3 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -583,6 +583,7 @@ struct drm_nouveau_private {
+ uint64_t vm_end;
+ struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
+ int vm_vram_pt_nr;
++ uint64_t vram_sys_base;
+
+ /* the mtrr covering the FB */
+ int fb_mtrr;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
+index 8f3a12f..2dc09db 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
+@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
+ uint32_t flags, uint64_t phys)
+ {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+- struct nouveau_gpuobj **pgt;
+- unsigned psz, pfl, pages;
+-
+- if (virt >= dev_priv->vm_gart_base &&
+- (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
+- psz = 12;
+- pgt = &dev_priv->gart_info.sg_ctxdma;
+- pfl = 0x21;
+- virt -= dev_priv->vm_gart_base;
+- } else
+- if (virt >= dev_priv->vm_vram_base &&
+- (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
+- psz = 16;
+- pgt = dev_priv->vm_vram_pt;
+- pfl = 0x01;
+- virt -= dev_priv->vm_vram_base;
+- } else {
+- NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
+- virt, virt + size - 1);
+- return -EINVAL;
+- }
++ struct nouveau_gpuobj *pgt;
++ unsigned block;
++ int i;
+
+- pages = size >> psz;
++ virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
++ size = (size >> 16) << 1;
++
++ phys |= ((uint64_t)flags << 32);
++ phys |= 1;
++ if (dev_priv->vram_sys_base) {
++ phys += dev_priv->vram_sys_base;
++ phys |= 0x30;
++ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+- if (flags & 0x80000000) {
+- while (pages--) {
+- struct nouveau_gpuobj *pt = pgt[virt >> 29];
+- unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
++ while (size) {
++ unsigned offset_h = upper_32_bits(phys);
++ unsigned offset_l = lower_32_bits(phys);
++ unsigned pte, end;
++
++ for (i = 7; i >= 0; i--) {
++ block = 1 << (i + 1);
++ if (size >= block && !(virt & (block - 1)))
++ break;
++ }
++ offset_l |= (i << 7);
+
+- nv_wo32(dev, pt, pte++, 0x00000000);
+- nv_wo32(dev, pt, pte++, 0x00000000);
++ phys += block << 15;
++ size -= block;
+
+- virt += (1 << psz);
+- }
+- } else {
+- while (pages--) {
+- struct nouveau_gpuobj *pt = pgt[virt >> 29];
+- unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+- unsigned offset_h = upper_32_bits(phys) & 0xff;
+- unsigned offset_l = lower_32_bits(phys);
++ while (block) {
++ pgt = dev_priv->vm_vram_pt[virt >> 14];
++ pte = virt & 0x3ffe;
+
+- nv_wo32(dev, pt, pte++, offset_l | pfl);
+- nv_wo32(dev, pt, pte++, offset_h | flags);
++ end = pte + block;
++ if (end > 16384)
++ end = 16384;
++ block -= (end - pte);
++ virt += (end - pte);
+
+- phys += (1 << psz);
+- virt += (1 << psz);
++ while (pte < end) {
++ nv_wo32(dev, pgt, pte++, offset_l);
++ nv_wo32(dev, pgt, pte++, offset_h);
++ }
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
+ void
+ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
+ {
+- nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
++ struct drm_nouveau_private *dev_priv = dev->dev_private;
++ struct nouveau_gpuobj *pgt;
++ unsigned pages, pte, end;
++
++ virt -= dev_priv->vm_vram_base;
++ pages = (size >> 16) << 1;
++
++ dev_priv->engine.instmem.prepare_access(dev, true);
++ while (pages) {
++ pgt = dev_priv->vm_vram_pt[virt >> 29];
++ pte = (virt & 0x1ffe0000ULL) >> 15;
++
++ end = pte + pages;
++ if (end > 16384)
++ end = 16384;
++ pages -= (end - pte);
++ virt += (end - pte) << 15;
++
++ while (pte < end)
++ nv_wo32(dev, pgt, pte++, 0);
++ }
++ dev_priv->engine.instmem.finish_access(dev);
++
++ nv_wr32(dev, 0x100c80, 0x00050001);
++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
++ return;
++ }
++
++ nv_wr32(dev, 0x100c80, 0x00000001);
++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
++ }
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
+index d0e038d..1d73b15 100644
+--- a/drivers/gpu/drm/nouveau/nv04_dac.c
++++ b/drivers/gpu/drm/nouveau/nv04_dac.c
+@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+ {
+ struct drm_device *dev = encoder->dev;
+- uint8_t saved_seq1, saved_pi, saved_rpc1;
++ uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
+ uint8_t saved_palette0[3], saved_palette_mask;
+ uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
+ int i;
+@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
+ /* only implemented for head A for now */
+ NVSetOwner(dev, 0);
+
++ saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
++ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
++
+ saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
+
+@@ -203,6 +206,7 @@ out:
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
++ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
+
+ if (blue == 0x18) {
+ NV_INFO(dev, "Load detected on head A\n");
+diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
+index 94400f7..f0dc4e3 100644
+--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
++++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
+@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
+ for (i = 0x1700; i <= 0x1710; i += 4)
+ priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
+
++ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
++ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
++ else
++ dev_priv->vram_sys_base = 0;
++
+ /* Reserve the last MiB of VRAM, we should probably try to avoid
+ * setting up the below tables over the top of the VBIOS image at
+ * some point.
+@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
+ * We map the entire fake channel into the start of the PRAMIN BAR
+ */
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
+- 0, &priv->pramin_pt);
++ 0, &priv->pramin_pt);
+ if (ret)
+ return ret;
+
+- for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
+- if (v < (c_offset + c_size))
+- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
+- else
+- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
++ v = c_offset | 1;
++ if (dev_priv->vram_sys_base) {
++ v += dev_priv->vram_sys_base;
++ v |= 0x30;
++ }
++
++ i = 0;
++ while (v < dev_priv->vram_sys_base + c_offset + c_size) {
++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
++ v += 0x1000;
++ i += 8;
++ }
++
++ while (i < pt_size) {
++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
++ i += 8;
+ }
+
+ BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
+@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+ {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+- uint32_t pte, pte_end, vram;
++ struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
++ uint32_t pte, pte_end;
++ uint64_t vram;
+
+ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+ NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
+ gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+
+- pte = (gpuobj->im_pramin->start >> 12) << 3;
+- pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
++ pte = (gpuobj->im_pramin->start >> 12) << 1;
++ pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
+ vram = gpuobj->im_backing_start;
+
+ NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
+ gpuobj->im_pramin->start, pte, pte_end);
+ NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+
++ vram |= 1;
++ if (dev_priv->vram_sys_base) {
++ vram += dev_priv->vram_sys_base;
++ vram |= 0x30;
++ }
++
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pte < pte_end) {
+- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
+- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+-
+- pte += 8;
++ nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
++ nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
+ vram += NV50_INSTMEM_PAGE_SIZE;
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+- pte = (gpuobj->im_pramin->start >> 12) << 3;
+- pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
++ pte = (gpuobj->im_pramin->start >> 12) << 1;
++ pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pte < pte_end) {
+- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
+- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+- pte += 8;
++ nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
++ nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index bcf843f..71247da 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -3726,7 +3726,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
+ }
+ table = &bios->data[bios->display.dp_table_ptr];
+
+- if (table[0] != 0x21) {
++ if (table[0] != 0x20 && table[0] != 0x21) {
+ NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
+ table[0]);
+ return NULL;
diff --git a/freed-ora/current/F-12/drm-radeon-pm.patch b/freed-ora/current/F-12/drm-radeon-pm.patch
new file mode 100644
index 000000000..df12ec8a8
--- /dev/null
+++ b/freed-ora/current/F-12/drm-radeon-pm.patch
@@ -0,0 +1,586 @@
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/atombios_crtc.c.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/atombios_crtc.c
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/atombios_crtc.c.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/atombios_crtc.c 2009-03-03 20:53:05.000000000 +0000
+@@ -441,14 +441,23 @@ static bool atombios_crtc_mode_fixup(str
+
+ static void atombios_crtc_prepare(struct drm_crtc *crtc)
+ {
++ struct drm_device *dev = crtc->dev;
++ struct drm_radeon_private *dev_priv = dev->dev_private;
++
++ mutex_lock(&dev_priv->mode_info.power.pll_mutex);
++
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ atombios_lock_crtc(crtc, 1);
+ }
+
+ static void atombios_crtc_commit(struct drm_crtc *crtc)
+ {
++ struct drm_device *dev = crtc->dev;
++ struct drm_radeon_private *dev_priv = dev->dev_private;
++
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ atombios_lock_crtc(crtc, 0);
++ mutex_unlock(&dev_priv->mode_info.power.pll_mutex);
+ }
+
+ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_atombios.c.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_atombios.c
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_atombios.c.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_atombios.c 2009-03-03 20:53:05.000000000 +0000
+@@ -620,6 +620,34 @@ void radeon_atom_static_pwrmgt_setup(str
+ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+
++void radeon_atom_get_mc_arb_info(struct drm_device *dev)
++{
++ struct drm_radeon_private *dev_priv = dev->dev_private;
++ struct radeon_mode_info *mode_info = &dev_priv->mode_info;
++ struct atom_context *ctx = mode_info->atom_context;
++ int index = GetIndexIntoMasterTable(DATA, MC_InitParameter);
++ uint8_t frev, crev;
++ uint16_t size, data_offset;
++
++ atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
++ dev_priv->mode_info.power.mc_arb_init_values =
++ kmalloc(size*sizeof(int), GFP_KERNEL);
++ memcpy(dev_priv->mode_info.power.mc_arb_init_values,
++ ctx->bios + data_offset, size * sizeof(int));
++}
++
++void radeon_atom_get_engine_clock(struct drm_device *dev, int *engine_clock)
++{
++ struct drm_radeon_private *dev_priv = dev->dev_private;
++ struct radeon_mode_info *mode_info = &dev_priv->mode_info;
++ struct atom_context *ctx = mode_info->atom_context;
++ GET_ENGINE_CLOCK_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
++
++ atom_execute_table(ctx, index, (uint32_t *)&args);
++ *engine_clock = args.ulReturnEngineClock;
++}
++
+ void radeon_atom_set_engine_clock(struct drm_device *dev, int eng_clock)
+ {
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+@@ -633,6 +661,18 @@ void radeon_atom_set_engine_clock(struct
+ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+
++void radeon_atom_get_memory_clock(struct drm_device *dev, int *mem_clock)
++{
++ struct drm_radeon_private *dev_priv = dev->dev_private;
++ struct radeon_mode_info *mode_info = &dev_priv->mode_info;
++ struct atom_context *ctx = mode_info->atom_context;
++ GET_MEMORY_CLOCK_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
++
++ atom_execute_table(ctx, index, (uint32_t *)&args);
++ *mem_clock = args.ulReturnMemoryClock;
++}
++
+ void radeon_atom_set_memory_clock(struct drm_device *dev, int mem_clock)
+ {
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+@@ -646,6 +686,16 @@ void radeon_atom_set_memory_clock(struct
+ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+
++void radeon_atom_initialize_memory_controller(struct drm_device *dev)
++{
++ struct drm_radeon_private *dev_priv = dev->dev_private;
++ struct atom_context *ctx = dev_priv->mode_info.atom_context;
++ int index = GetIndexIntoMasterTable(COMMAND, MemoryDeviceInit);
++ MEMORY_PLLINIT_PS_ALLOCATION args;
++
++ atom_execute_table(ctx, index, (uint32_t *)&args);
++}
++
+ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+ {
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_cp.c.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_cp.c
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_cp.c.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_cp.c 2009-03-03 20:53:05.000000000 +0000
+@@ -3223,6 +3223,8 @@ int radeon_driver_load(struct drm_device
+ if (ret)
+ goto modeset_fail;
+
++ mutex_init(&dev_priv->mode_info.power.pll_mutex);
++
+ radeon_modeset_init(dev);
+
+ radeon_modeset_cp_init(dev);
+@@ -3231,7 +3233,7 @@ int radeon_driver_load(struct drm_device
+ drm_irq_install(dev);
+ }
+
+-
++ radeon_pm_init(dev);
+ return ret;
+ modeset_fail:
+ dev->driver->driver_features &= ~DRIVER_MODESET;
+@@ -3303,6 +3305,8 @@ int radeon_driver_unload(struct drm_devi
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
++ radeon_pm_exit(dev);
++
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_irq_uninstall(dev);
+ radeon_modeset_cleanup(dev);
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_cs.c.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_cs.c
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_cs.c.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_cs.c 2009-03-03 20:53:05.000000000 +0000
+@@ -41,6 +41,8 @@ int radeon_cs_ioctl(struct drm_device *d
+ long size;
+ int r, i;
+
++ radeon_pm_timer_reset(dev);
++
+ mutex_lock(&dev_priv->cs.cs_mutex);
+ /* set command stream id to 0 which is fake id */
+ cs_id = 0;
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_drv.h.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_drv.h
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_drv.h.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_drv.h 2009-03-03 20:53:05.000000000 +0000
+@@ -612,6 +612,9 @@ extern int radeon_modeset_cp_resume(stru
+ /* radeon_pm.c */
+ int radeon_suspend(struct drm_device *dev, pm_message_t state);
+ int radeon_resume(struct drm_device *dev);
++void radeon_pm_init(struct drm_device *dev);
++void radeon_pm_exit(struct drm_device *dev);
++void radeon_pm_timer_reset(struct drm_device *dev);
+
+ /* Flags for stats.boxes
+ */
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_irq.c.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_irq.c
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_irq.c.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_irq.c 2009-03-03 20:53:05.000000000 +0000
+@@ -185,8 +185,10 @@ irqreturn_t radeon_driver_irq_handler(DR
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
++ struct radeon_powermanagement_info *power = &dev_priv->mode_info.power;
+ u32 stat;
+ u32 r500_disp_int;
++ unsigned long flags;
+
+ /* Only consider the bits we're interested in - others could be used
+ * outside the DRM
+@@ -206,15 +208,47 @@ irqreturn_t radeon_driver_irq_handler(DR
+
+ /* VBLANK interrupt */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+- if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
++ if (r500_disp_int & R500_D1_VBLANK_INTERRUPT) {
++ spin_lock_irqsave(&power->power_lock, flags);
++ if (power->reclock_head & 1) {
++ power->reclock_head &= ~1;
++ schedule_work(&power->reclock_work);
++ drm_vblank_put(dev, 0);
++ }
++ spin_unlock_irqrestore(&power->power_lock, flags);
+ drm_handle_vblank(dev, 0);
+- if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
++ }
++ if (r500_disp_int & R500_D2_VBLANK_INTERRUPT) {
++ spin_lock_irqsave(&power->power_lock, flags);
++ if (power->reclock_head & 2) {
++ power->reclock_head &= ~2;
++ schedule_work(&power->reclock_work);
++ drm_vblank_put(dev, 1);
++ }
++ spin_unlock_irqrestore(&power->power_lock, flags);
+ drm_handle_vblank(dev, 1);
++ }
+ } else {
+- if (stat & RADEON_CRTC_VBLANK_STAT)
++ if (stat & RADEON_CRTC_VBLANK_STAT) {
++ spin_lock_irqsave(&power->power_lock, flags);
++ if (power->reclock_head & 1) {
++ power->reclock_head &= ~1;
++ schedule_work(&power->reclock_work);
++ drm_vblank_put(dev, 0);
++ }
++ spin_unlock_irqrestore(&power->power_lock, flags);
+ drm_handle_vblank(dev, 0);
+- if (stat & RADEON_CRTC2_VBLANK_STAT)
++ }
++ if (stat & RADEON_CRTC2_VBLANK_STAT) {
++ spin_lock_irqsave(&power->power_lock, flags);
++ if (power->reclock_head & 2) {
++ power->reclock_head &= ~2;
++ schedule_work(&power->reclock_work);
++ drm_vblank_put(dev, 1);
++ }
++ spin_unlock_irqrestore(&power->power_lock, flags);
+ drm_handle_vblank(dev, 1);
++ }
+ }
+ return IRQ_HANDLED;
+ }
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_mode.h.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_mode.h
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_mode.h.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_mode.h 2009-03-03 20:53:05.000000000 +0000
+@@ -173,6 +173,22 @@ struct radeon_i2c_chan {
+ struct radeon_i2c_bus_rec rec;
+ };
+
++struct radeon_powermanagement_info {
++ struct timer_list idle_power_timer;
++ struct work_struct reclock_work;
++ struct drm_device *dev;
++ uint32_t orig_memory_clock;
++ uint32_t orig_engine_clock;
++ uint32_t *mc_arb_init_values;
++ uint8_t orig_fbdiv;
++ int new_mem_clock;
++ int new_engine_clock;
++ int current_clock_state;
++ int reclock_head;
++ struct mutex pll_mutex;
++ spinlock_t power_lock;
++};
++
+ struct radeon_mode_info {
+ struct atom_context *atom_context;
+ struct radeon_bios_connector bios_connector[RADEON_MAX_BIOS_CONNECTOR];
+@@ -182,6 +198,9 @@ struct radeon_mode_info {
+ struct radeon_pll mpll;
+ uint32_t mclk;
+ uint32_t sclk;
++
++ /* power management */
++ struct radeon_powermanagement_info power;
+ };
+
+ struct radeon_crtc {
+@@ -307,6 +326,12 @@ extern int radeon_crtc_cursor_move(struc
+
+ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
+ extern bool radeon_combios_get_clock_info(struct drm_device *dev);
++extern void radeon_atom_get_engine_clock(struct drm_device *dev, int *engine_clock);
++extern void radeon_atom_get_memory_clock(struct drm_device *dev, int *memory_clock);
++extern void radeon_atom_set_engine_clock(struct drm_device *dev, int engine_clock);
++extern void radeon_atom_set_memory_clock(struct drm_device *dev, int memory_clock);
++extern void radeon_atom_initialize_memory_controller(struct drm_device *dev);
++extern void radeon_atom_get_mc_arb_info(struct drm_device *dev);
+ extern void radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
+ extern void radeon_atombios_get_tmds_info(struct radeon_encoder *encoder);
+ extern bool radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_pm.c.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_pm.c
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_pm.c.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_pm.c 2009-03-03 20:53:05.000000000 +0000
+@@ -31,6 +31,8 @@
+
+ #include "drm_crtc_helper.h"
+
++#define RADEON_DOWNCLOCK_IDLE_MS 30
++
+ int radeon_suspend(struct drm_device *dev, pm_message_t state)
+ {
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+@@ -255,3 +257,214 @@ bool radeon_set_pcie_lanes(struct drm_de
+ return false;
+ }
+
++static void radeon_pm_set_engine_clock(struct drm_device *dev, int freq)
++{
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++
++ if (dev_priv->is_atom_bios)
++ radeon_atom_set_engine_clock(dev, freq);
++}
++
++static void radeon_pm_set_memory_clock(struct drm_device *dev, int freq)
++{
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++ struct radeon_powermanagement_info *power = &dev_priv->mode_info.power;
++
++ mutex_lock(&power->pll_mutex);
++ radeon_do_cp_idle(dev_priv);
++ if (dev_priv->is_atom_bios) {
++ int mpll, spll, hclk, sclk, fbdiv, index, factor;
++ switch (dev_priv->chip_family) {
++ case CHIP_R520:
++ case CHIP_RV530:
++ case CHIP_RV560:
++ case CHIP_RV570:
++ case CHIP_R580:
++ mpll = RADEON_READ_PLL(dev_priv, MPLL_FUNC_CNTL);
++ fbdiv = (mpll & 0x1fe0) >> 5;
++
++ /* Set new fbdiv */
++ factor = power->orig_memory_clock / freq;
++ fbdiv = power->orig_fbdiv / factor;
++
++ mpll &= ~0x1fe0;
++ mpll |= ((fbdiv << 5) | (1 << 24));
++ mpll &= ~(1 << 25);
++
++ spll = RADEON_READ_PLL(dev_priv, SPLL_FUNC_CNTL);
++
++ hclk = fbdiv << 5;
++ hclk += 0x20;
++ hclk *= 8;
++
++ sclk = spll & 0x1fe0;
++ sclk += 0x20;
++ sclk *= 6;
++ sclk = sclk >> 5;
++
++ index = (hclk/sclk);
++
++ R500_WRITE_MCIND(R530_MC_ARB_RATIO_CLK_SEQ,
++ power->mc_arb_init_values[index]);
++ RADEON_WRITE_PLL(dev_priv, MPLL_FUNC_CNTL, mpll);
++ radeon_atom_initialize_memory_controller(dev);
++ break;
++ }
++ }
++
++ mutex_unlock(&power->pll_mutex);
++}
++
++static int radeon_pm_get_active_crtcs(struct drm_device *dev, int *crtcs)
++{
++ struct drm_crtc *crtc;
++ int count = 0;
++ struct radeon_crtc *radeon_crtc;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ radeon_crtc = to_radeon_crtc(crtc);
++ if (crtc->enabled) {
++ count++;
++ *crtcs |= (1 << radeon_crtc->crtc_id);
++ }
++ }
++ return count;
++}
++
++
++static void radeon_pm_perform_transition(struct drm_device *dev)
++{
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++ struct radeon_powermanagement_info *power = &dev_priv->mode_info.power;
++ int crtcs = 0, count;
++ unsigned long flags;
++
++ count = radeon_pm_get_active_crtcs(dev, &crtcs);
++
++ spin_lock_irqsave(&power->power_lock, flags);
++ switch (count) {
++ case 0:
++ schedule_work(&power->reclock_work);
++ break;
++ case 1:
++ if (power->reclock_head)
++ break;
++ if (crtcs & 1) {
++ power->reclock_head |= 1;
++ drm_vblank_get(dev, 0);
++ } else {
++ power->reclock_head |= 2;
++ drm_vblank_get(dev, 1);
++ }
++ break;
++ default:
++ /* Too many active heads */
++ break;
++ }
++ spin_unlock_irqrestore(&power->power_lock, flags);
++}
++
++
++static int radeon_pm_set_runtime_power(struct drm_device *dev, int value)
++{
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++ struct radeon_powermanagement_info *power = &dev_priv->mode_info.power;
++
++ if (power->current_clock_state == value)
++ return 1;
++
++ switch (value) {
++ case 0:
++ power->new_engine_clock = 100*100;
++ power->new_mem_clock = 100*100;
++ break;
++ case 1:
++ power->new_engine_clock = power->orig_engine_clock;
++ power->new_mem_clock = power->orig_memory_clock;
++ break;
++ }
++
++ power->current_clock_state = value;
++ radeon_pm_perform_transition(dev);
++
++ return 0;
++}
++
++static void radeon_pm_idle_timeout(unsigned long d)
++{
++ struct drm_device *dev = (struct drm_device *)d;
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++
++ radeon_pm_set_runtime_power(dev, 0);
++}
++
++static void radeon_pm_reclock_callback(struct work_struct *work)
++{
++ struct radeon_powermanagement_info *power =
++ container_of(work, struct radeon_powermanagement_info,
++ reclock_work);
++ struct drm_device *dev = power->dev;
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++
++ mutex_lock(&dev_priv->cs.cs_mutex);
++ radeon_pm_set_memory_clock(dev, power->new_mem_clock);
++ radeon_pm_set_engine_clock(dev, power->new_engine_clock);
++ mutex_unlock(&dev_priv->cs.cs_mutex);
++}
++
++void radeon_pm_timer_reset(struct drm_device *dev)
++{
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++ struct radeon_powermanagement_info *power = &dev_priv->mode_info.power;
++
++ if (!drm_core_check_feature(dev, DRIVER_MODESET))
++ return;
++
++ radeon_pm_set_runtime_power(dev, 1);
++
++ mod_timer(&power->idle_power_timer,
++ jiffies + msecs_to_jiffies(RADEON_DOWNCLOCK_IDLE_MS));
++}
++
++void radeon_pm_init(struct drm_device *dev)
++{
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++ struct radeon_powermanagement_info *power = &dev_priv->mode_info.power;
++
++ power->dev = dev;
++
++ if (!drm_core_check_feature(dev, DRIVER_MODESET))
++ return;
++
++ if (dev_priv->is_atom_bios) {
++ int mpll;
++ radeon_atom_get_mc_arb_info(dev);
++ radeon_atom_get_engine_clock(dev, &power->orig_engine_clock);
++ radeon_atom_get_memory_clock(dev, &power->orig_memory_clock);
++
++ mpll = RADEON_READ_PLL(dev_priv, MPLL_FUNC_CNTL);
++ dev_priv->mode_info.power.orig_fbdiv = (mpll & 0x1fe0) >> 5;
++ }
++
++ setup_timer(&power->idle_power_timer, radeon_pm_idle_timeout,
++ (unsigned long)dev);
++ INIT_WORK(&power->reclock_work, radeon_pm_reclock_callback);
++
++ spin_lock_init(&power->power_lock);
++
++ power->current_clock_state = 1;
++ power->reclock_head = 0;
++
++ radeon_pm_timer_reset(dev);
++}
++
++void radeon_pm_exit(struct drm_device *dev)
++{
++ drm_radeon_private_t *dev_priv = dev->dev_private;
++ struct radeon_powermanagement_info *power = &dev_priv->mode_info.power;
++
++ if (!drm_core_check_feature(dev, DRIVER_MODESET))
++ return;
++
++ del_timer_sync(&power->idle_power_timer);
++}
+diff -up linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_reg.h.mjg linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_reg.h
+--- linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_reg.h.mjg 2009-03-03 19:41:48.000000000 +0000
++++ linux-2.6.28.x86_64/drivers/gpu/drm/radeon/radeon_reg.h 2009-03-03 20:53:05.000000000 +0000
+@@ -303,6 +303,28 @@
+ # define RADEON_PLL_WR_EN (1 << 7)
+ # define RADEON_PLL_DIV_SEL (3 << 8)
+ # define RADEON_PLL2_DIV_SEL_MASK ~(3 << 8)
++#define SPLL_FUNC_CNTL 0x0000
++#define MPLL_FUNC_CNTL 0x0004
++#define GENERAL_PWRMGT 0x0008
++# define RADEON_GLOBAL_PWRMGT_EN (1 << 0)
++#define SCLK_PWRMGT_CNTL 0x0009
++# define RADEON_SCLK_PWRMGT_OFF (1 << 0)
++#define MCLK_PWRMGT_CNTL 0x000a
++# define RADEON_MCLK_PWRMGT_OFF (1 << 0)
++#define DYN_PWRMGT_SCLK_CNTL 0x000b
++# define RADEON_ENGINE_DYNCLK_MODE (1 << 0)
++# define RADEON_STATIC_SCREEN_EN (1 << 20)
++# define RADEON_CLIENT_SELECT_POWER_EN (1 << 21)
++#define DYN_SCLK_PWMEN_PIPE 0x000d
++# define RADEON_PIPE_3D_NOT_AUTO (1 << 8)
++#define DYN_SCLK_VOL_CNTL 0x000e
++# define RADEON_IO_CG_VOLTAGE_DROP (1 << 0)
++# define RADEON_VOLTAGE_DROP_SYNC (1 << 2)
++#define CP_DYN_CNTL 0x000f
++# define RADEON_CP_FORCEON (1 << 0)
++# define RADEON_CP_LOWER_POWER_IGNORE (1 << 20)
++# define RADEON_CP_NORMAL_POWER_IGNORE (1 << 21)
++# define RADEON_CP_NORMAL_POWER_BUSY (1 << 24)
+ #define RADEON_CLK_PWRMGT_CNTL 0x0014
+ # define RADEON_ENGIN_DYNCLK_MODE (1 << 12)
+ # define RADEON_ACTIVE_HILO_LAT_MASK (3 << 13)
+@@ -3961,7 +3983,48 @@
+ # define AVIVO_I2C_RESET (1 << 8)
+
+ #define R600_GENERAL_PWRMGT 0x618
++# define R600_GLOBAL_PWRMGT_EN (1 << 0)
++# define R600_STATIC_PM_EN (1 << 1)
++# define R600_MOBILE_SU (1 << 2)
++# define R600_THERMAL_PROTECTION_DIS (1 << 3)
++# define R600_THERMAL_PROTECTION_TYPE (1 << 4)
++# define R600_ENABLE_GEN2PCIE (1 << 5)
++# define R600_SW_GPIO_INDEX (1 << 6)
++# define R600_LOW_VOLT_D2_ACPI (1 << 8)
++# define R600_LOW_VOLT_D3_ACPI (1 << 9)
++# define R600_VOLT_PWRMGT_EN (1 << 10)
+ # define R600_OPEN_DRAIN_PADS (1 << 11)
++# define R600_AVP_SCLK_EN (1 << 12)
++# define R600_IDCT_SCLK_EN (1 << 13)
++# define R600_GPU_COUNTER_ACPI (1 << 14)
++# define R600_COUNTER_CLK (1 << 15)
++# define R600_BACKBIAS_PAD_EN (1 << 16)
++# define R600_BACKBIAS_VALUE (1 << 17)
++# define R600_BACKBIAS_DPM_CNTL (1 << 18)
++# define R600_SPREAD_SPECTRUM_INDEX (1 << 19)
++# define R600_DYN_SPREAD_SPECTRUM_EN (1 << 21)
++
++#define R600_SCLK_PWRMGT_CNTL 0x620
++# define R600_SCLK_PWRMGT_OFF (1 << 0)
++# define R600_SCLK_TURNOFF (1 << 1)
++# define R600_SPLL_TURNOFF (1 << 2)
++# define R600_SU_SCLK_USE_BCLK (1 << 3)
++# define R600_DYNAMIC_GFX_ISLAND_PWR_DOWN (1 << 4)
++# define R600_DYNAMIC_GFX_ISLAND_LP (1 << 5)
++# define R600_CLK_TURN_ON_STAGGER (1 << 6)
++# define R600_CLK_TURN_OFF_STAGGER (1 << 7)
++# define R600_FIR_FORCE_TREND_SEL (1 << 8)
++# define R600_FIR_TREND_MODE (1 << 9)
++# define R600_DYN_GFX_CLK_OFF_EN (1 << 10)
++# define R600_VDDC3D_TURNOFF_D1 (1 << 11)
++# define R600_VDDC3D_TURNOFF_D2 (1 << 12)
++# define R600_VDDC3D_TURNOFF_D3 (1 << 13)
++# define R600_SPLL_TURNOFF_D2 (1 << 14)
++# define R600_SCLK_LOW_D1 (1 << 15)
++# define R600_DYN_GFX_CLK_OFF_MC_EN (1 << 16)
++
++#define R600_MCLK_PWRMGT_CNTL 0x624
++# define R600_MPLL_PWRMGT_OFF (1 << 0)
+
+ #define R600_LOWER_GPIO_ENABLE 0x710
+ #define R600_CTXSW_VID_LOWER_GPIO_CNTL 0x718
+@@ -5331,5 +5394,6 @@
+ # define R500_RS_IP_OFFSET_EN (1 << 31)
+
+ #define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
++#define R530_MC_ARB_RATIO_CLK_SEQ 0x0016 /* MC */
+
+ #endif
diff --git a/freed-ora/current/F-12/drm-upgrayed-fixes.patch b/freed-ora/current/F-12/drm-upgrayed-fixes.patch
new file mode 100644
index 000000000..e437dc21f
--- /dev/null
+++ b/freed-ora/current/F-12/drm-upgrayed-fixes.patch
@@ -0,0 +1,1092 @@
+commit 9b86cec953ee8ae21be2183f34b8c8d2fcf14ecb
+Author: Alex Deucher <alexdeucher@gmail.com>
+Date: Thu Feb 11 10:47:52 2010 -0500
+
+ drm/radeon/kms: remove HDP flushes from fence emit (v2)
+
+ r600_ioctl_wait_idle() now handles this.
+
+ v2: update blit fence counts
+
+ Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 817dbb17e5e8fcdfb7bb3d18f3ceb15078dd6050
+Author: Dave Airlie <airlied@redhat.com>
+Date: Wed Mar 3 13:22:20 2010 +1000
+
+ drm/radeon: Add asic hook for dma copy to r200 cards.
+
+ r200 cards have dma engine which can be used to tranfer data
+ between vram and system memory.
+
+ r300 dma engine registers match r200 dma engine. Enabling
+ dma copy for r200 is simple as hooking r200 asic to already
+ existing function r300_copy_dma.
+
+ Rename r300_dma_copy to r200_dma_copyto reflect that supports
+ starts from r200 cards.
+
+ v2: Created a new asic object for r200 cards.
+
+ Signed-off-by: Pauli Nieminen <suokkos@gmail.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+ Conflicts:
+
+ drivers/gpu/drm/radeon/r300.c
+ drivers/gpu/drm/radeon/radeon_asic.h
+
+commit 30988e41d355e2011d1a340d6b889deeceeb447d
+Author: Pauli Nieminen <suokkos@gmail.com>
+Date: Thu Feb 11 17:55:35 2010 +0000
+
+ drm/radeon/kms: Create asic structure for r300 pcie cards.
+
+ Setting global asic structure to point to different function
+ would cause problem in system where is multiple r300 cards
+ with different bus type.
+
+ r300_asic_pcie is just copy from r300_asic with gart tlb
+ functions replaced with pcie versions.
+
+ Signed-off-by: Pauli Nieminen <suokkos@gmail.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 0faed138b285ef54c27a6c17016d8ff718c40113
+Author: Jesse Barnes <jbarnes@virtuousgeek.org>
+Date: Fri Feb 12 09:30:00 2010 -0800
+
+ drm/i915: give up on 8xx lid status
+
+ These old machines more often than not lie about their lid state. So
+ don't use it to detect LVDS presence, but leave the event handler to
+ deal with lid open/close, when we might need to reset the mode.
+
+ Fixes kernel bug #15248
+
+ Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+ Cc: stable@kernel.org
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+commit 19d2051901dc28cf96d2b29e5f5c5f6725e90105
+Author: Jerome Glisse <jglisse@redhat.com>
+Date: Fri Feb 26 19:14:12 2010 +0000
+
+ drm/radeon/kms: initialize set_surface_reg reg for rs600 asic
+
+ rs600 asic was missing set_surface_reg callback leading to
+ oops.
+
+ Signed-off-by: Jerome Glisse <jglisse@redhat.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 884d6cda747259e5ba56c00cdc8a5406a45209c9
+Author: Dave Airlie <airlied@redhat.com>
+Date: Thu Feb 11 14:28:58 2010 +1000
+
+ drm/kms: fix fb_changed = true else statement
+
+ a patch from Roel was wrong, fix this properly, really
+ if the fb ptrs are different fb changed shuold be true.
+
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit b6428034e4bc7c9d57d1a7394a4d77fc80737cf4
+Author: Alex Deucher <alexdeucher@gmail.com>
+Date: Tue Feb 23 21:56:12 2010 -0500
+
+ drm/radeon/kms/atom: fix shr/shl ops
+
+ The whole attribute table is valid for
+ shr/shl ops.
+
+ Fixes fdo bug 26668
+
+ Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+ Cc: stable@kernel.org
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 93e49a04b1118c8036ccba855c1b8b095fd6b51b
+Author: Maarten Maathuis <madman2003@gmail.com>
+Date: Sat Feb 20 03:22:21 2010 +0100
+
+ drm/ttm: handle OOM in ttm_tt_swapout
+
+ - Without this change I get a general protection fault.
+ - Also use PTR_ERR where applicable.
+
+ Signed-off-by: Maarten Maathuis <madman2003@gmail.com>
+ Reviewed-by: Dave Airlie <airlied@redhat.com>
+ Acked-by: Thomas Hellstrom <thellstrom@vmware.com>
+ Cc: stable@kernel.org
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 36e437d37c849da2ad35d4ba3c0662d8a06f9f9f
+Author: Zhang Rui <rui.zhang@intel.com>
+Date: Tue Feb 16 04:16:55 2010 -0500
+
+ ACPI, i915: blacklist Clevo M5x0N bad_lid state
+
+ Wrong Lid state reported.
+ Need to blacklist this machine for LVDS detection.
+
+ Signed-off-by: Zhang Rui <rui.zhang@intel.com>
+ Signed-off-by: Len Brown <len.brown@intel.com>
+
+commit 5bf1252153707877ae62bef73fe00731972ef18b
+Author: Adam Jackson <ajax@redhat.com>
+Date: Mon Feb 15 22:15:39 2010 +0000
+
+ drm/edid: Fix interlaced detailed timings to be frame size, not field.
+
+ cf. https://bugzilla.redhat.com/show_bug.cgi?id=533561
+
+ Signed-off-by: Adam Jackson <ajax@redhat.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 268d51cd56bcf861bd668163c1b618c96d8b3701
+Author: Rafael J. Wysocki <rjw@sisk.pl>
+Date: Thu Feb 18 23:06:27 2010 +0100
+
+ i915 / PM: Fix hibernate regression caused by suspend/resume splitting
+
+ Commit 84b79f8d2882b0a84330c04839ed4d3cefd2ff77 (drm/i915: Fix crash
+ while aborting hibernation) attempted to fix a regression introduced
+ by commit cbda12d77ea590082edb6d30bd342a67ebc459e0 (drm/i915:
+ implement new pm ops for i915), but it went too far trying to split
+ the freeze/suspend and resume/thaw parts of the code. As a result,
+ it introduced another regression, which only is visible on some systems.
+
+ Fix the problem by merging i915_drm_suspend() with
+ i915_drm_freeze() and moving some code from i915_resume()
+ into i915_drm_thaw(), so that intel_opregion_free() and
+ intel_opregion_init() are also executed in the freeze and thaw code
+ paths, respectively.
+
+ Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+ Reported-and-tested-by: Pedro Ribeiro <pedrib@gmail.com>
+ Tested-by: Tino Keitel <tino.keitel@tikei.de>
+ Acked-by: Eric Anholt <eric@anholt.net>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 8d205562d42d397086d829ba4b8f3911c3dca682
+Author: Francisco Jerez <currojerez@riseup.net>
+Date: Sat Feb 20 07:30:15 2010 +1000
+
+ drm/ttm: fix caching problem on non-PAT systems.
+
+ http://bugzilla.kernel.org/show_bug.cgi?id=15328
+
+ This fixes a serious regression on AGP/non-PAT systems, where
+ pages were ending up in the wrong state and slowing down the
+ whole system.
+
+ [airlied: taken this from the bug as the other option is to revert
+ the change which caused it].
+
+ Tested-by: John W. Linville (in bug).
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 7a2bb26f41ceb9b17805dcae93311004e03bd1d0
+Author: Dave Airlie <airlied@redhat.com>
+Date: Sat Feb 20 09:17:18 2010 +1000
+
+ drm/radeon: bump the UMS driver version number to indicate rv740 fix
+
+ This lets UMS userspace know the rv740 fix is in. For KMS we can
+ consider the kernel release to be the v2.0.0 release so we don't need the
+ bump there.
+
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 211dc7044003e3ed8c7a2e2b646bf6937b921960
+Author: Jerome Glisse <jglisse@redhat.com>
+Date: Thu Feb 18 13:13:29 2010 +0000
+
+ drm/radeon/kms: free fence IB if it wasn't emited at IB free time
+
+ If at IB free time fence wasn't emited that means the IB wasn't
+ scheduled because an error occured somewhere, thus we can free
+ then fence and mark the IB as free.
+
+ Signed-off-by: Jerome Glisse <jglisse@redhat.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit b2449d6d4de19459d616011fbed87aaa485f5e71
+Author: Alex Deucher <alexdeucher@gmail.com>
+Date: Fri Feb 19 16:07:02 2010 -0500
+
+ drm/radeon/rv740: fix backend setup
+
+ This patch fixes occlusion queries and rendering errors
+ on rv740 boards. Hardcoding the backend map is not an optimal
+ solution, but a better fix is being worked on.
+
+ Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit 79b5531abdad5c266e87a5c35975e5a92f4aba20
+Author: Alex Deucher <alexdeucher@gmail.com>
+Date: Fri Feb 19 02:13:56 2010 -0500
+
+ drm/radeon/kms: fix shared ddc detection
+
+ Just compare the i2c id since the i2c structs
+ may be slighly different.
+
+ Fixes fdo bug 26616.
+
+ Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+commit d2ffb93351a506acdfede450778964eb862e63b7
+Author: Alex Deucher <alexdeucher@gmail.com>
+Date: Thu Feb 18 14:14:58 2010 -0500
+
+ drm/radeon/kms/rs600: add connector quirk
+
+ rs600 board lists DVI port as HDMI.
+
+ Fixes fdo bug 26605
+
+ Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index 7d0f00a..f2aaf39 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
+ mode_changed = true;
+ } else if (set->fb == NULL) {
+ mode_changed = true;
+- } else if ((set->fb->bits_per_pixel !=
+- set->crtc->fb->bits_per_pixel) ||
+- set->fb->depth != set->crtc->fb->depth)
+- fb_changed = true;
+- else
++ } else
+ fb_changed = true;
+ }
+
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index f41e91c..f97e7c4 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
+ return mode;
+ }
+
++/*
++ * EDID is delightfully ambiguous about how interlaced modes are to be
++ * encoded. Our internal representation is of frame height, but some
++ * HDTV detailed timings are encoded as field height.
++ *
++ * The format list here is from CEA, in frame size. Technically we
++ * should be checking refresh rate too. Whatever.
++ */
++static void
++drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
++ struct detailed_pixel_timing *pt)
++{
++ int i;
++ static const struct {
++ int w, h;
++ } cea_interlaced[] = {
++ { 1920, 1080 },
++ { 720, 480 },
++ { 1440, 480 },
++ { 2880, 480 },
++ { 720, 576 },
++ { 1440, 576 },
++ { 2880, 576 },
++ };
++ static const int n_sizes =
++ sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
++
++ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
++ return;
++
++ for (i = 0; i < n_sizes; i++) {
++ if ((mode->hdisplay == cea_interlaced[i].w) &&
++ (mode->vdisplay == cea_interlaced[i].h / 2)) {
++ mode->vdisplay *= 2;
++ mode->vsync_start *= 2;
++ mode->vsync_end *= 2;
++ mode->vtotal *= 2;
++ mode->vtotal |= 1;
++ }
++ }
++
++ mode->flags |= DRM_MODE_FLAG_INTERLACE;
++}
++
+ /**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+
+ drm_mode_set_name(mode);
+
+- if (pt->misc & DRM_EDID_PT_INTERLACED)
+- mode->flags |= DRM_MODE_FLAG_INTERLACE;
++ drm_mode_do_interlace_quirk(mode, pt);
+
+ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 79beffc..cf4cb3e 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -176,6 +176,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
+
+ static int i915_drm_freeze(struct drm_device *dev)
+ {
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
+ pci_save_state(dev->pdev);
+
+ /* If KMS is active, we do the leavevt stuff here */
+@@ -191,17 +193,12 @@ static int i915_drm_freeze(struct drm_device *dev)
+
+ i915_save_state(dev);
+
+- return 0;
+-}
+-
+-static void i915_drm_suspend(struct drm_device *dev)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+-
+ intel_opregion_free(dev, 1);
+
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
++
++ return 0;
+ }
+
+ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+@@ -221,8 +218,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+ if (error)
+ return error;
+
+- i915_drm_suspend(dev);
+-
+ if (state.event == PM_EVENT_SUSPEND) {
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+@@ -237,6 +232,10 @@ static int i915_drm_thaw(struct drm_device *dev)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int error = 0;
+
++ i915_restore_state(dev);
++
++ intel_opregion_init(dev, 1);
++
+ /* KMS EnterVT equivalent */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+@@ -263,10 +262,6 @@ static int i915_resume(struct drm_device *dev)
+
+ pci_set_master(dev->pdev);
+
+- i915_restore_state(dev);
+-
+- intel_opregion_init(dev, 1);
+-
+ return i915_drm_thaw(dev);
+ }
+
+@@ -423,8 +418,6 @@ static int i915_pm_suspend(struct device *dev)
+ if (error)
+ return error;
+
+- i915_drm_suspend(drm_dev);
+-
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+@@ -464,13 +457,8 @@ static int i915_pm_poweroff(struct device *dev)
+ {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+- int error;
+-
+- error = i915_drm_freeze(drm_dev);
+- if (!error)
+- i915_drm_suspend(drm_dev);
+
+- return error;
++ return i915_drm_freeze(drm_dev);
+ }
+
+ const struct dev_pm_ops i915_pm_ops = {
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index b1d0acb..93031a7 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
+ },
+ },
++ {
++ .ident = "Clevo M5x0N",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
++ DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
++ },
++ },
+ { }
+ };
+
+@@ -648,8 +655,15 @@ static const struct dmi_system_id bad_lid_status[] = {
+ */
+ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+ {
++ struct drm_device *dev = connector->dev;
+ enum drm_connector_status status = connector_status_connected;
+
++ /* ACPI lid methods were generally unreliable in this generation, so
++ * don't even bother.
++ */
++ if (IS_I8XX(dev))
++ return connector_status_connected;
++
+ if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
+ status = connector_status_disconnected;
+
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index 592ce91..4362f82 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -882,8 +882,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+- attr &= 0x38;
+- attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src(ctx, attr, ptr);
+@@ -898,8 +896,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+- attr &= 0x38;
+- attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src(ctx, attr, ptr);
+diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
+index ff1e0cd..1146c99 100644
+--- a/drivers/gpu/drm/radeon/r200.c
++++ b/drivers/gpu/drm/radeon/r200.c
+@@ -31,6 +31,7 @@
+ #include "radeon_reg.h"
+ #include "radeon.h"
+
++#include "r100d.h"
+ #include "r200_reg_safe.h"
+
+ #include "r100_track.h"
+@@ -79,6 +80,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
+ return vtx_size;
+ }
+
++int r200_copy_dma(struct radeon_device *rdev,
++ uint64_t src_offset,
++ uint64_t dst_offset,
++ unsigned num_pages,
++ struct radeon_fence *fence)
++{
++ uint32_t size;
++ uint32_t cur_size;
++ int i, num_loops;
++ int r = 0;
++
++ /* radeon pitch is /64 */
++ size = num_pages << PAGE_SHIFT;
++ num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
++ r = radeon_ring_lock(rdev, num_loops * 4 + 64);
++ if (r) {
++ DRM_ERROR("radeon: moving bo (%d).\n", r);
++ return r;
++ }
++ /* Must wait for 2D idle & clean before DMA or hangs might happen */
++ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
++ radeon_ring_write(rdev, (1 << 16));
++ for (i = 0; i < num_loops; i++) {
++ cur_size = size;
++ if (cur_size > 0x1FFFFF) {
++ cur_size = 0x1FFFFF;
++ }
++ size -= cur_size;
++ radeon_ring_write(rdev, PACKET0(0x720, 2));
++ radeon_ring_write(rdev, src_offset);
++ radeon_ring_write(rdev, dst_offset);
++ radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
++ src_offset += cur_size;
++ dst_offset += cur_size;
++ }
++ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
++ radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
++ if (fence) {
++ r = radeon_fence_emit(rdev, fence);
++ }
++ radeon_ring_unlock_commit(rdev);
++ return r;
++}
++
++
+ static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
+ {
+ int vtx_size, i, tex_size;
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index 43b55a0..6516cd6 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -198,50 +198,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
+ radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+ }
+
+-int r300_copy_dma(struct radeon_device *rdev,
+- uint64_t src_offset,
+- uint64_t dst_offset,
+- unsigned num_pages,
+- struct radeon_fence *fence)
+-{
+- uint32_t size;
+- uint32_t cur_size;
+- int i, num_loops;
+- int r = 0;
+-
+- /* radeon pitch is /64 */
+- size = num_pages << PAGE_SHIFT;
+- num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+- r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+- if (r) {
+- DRM_ERROR("radeon: moving bo (%d).\n", r);
+- return r;
+- }
+- /* Must wait for 2D idle & clean before DMA or hangs might happen */
+- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
+- radeon_ring_write(rdev, (1 << 16));
+- for (i = 0; i < num_loops; i++) {
+- cur_size = size;
+- if (cur_size > 0x1FFFFF) {
+- cur_size = 0x1FFFFF;
+- }
+- size -= cur_size;
+- radeon_ring_write(rdev, PACKET0(0x720, 2));
+- radeon_ring_write(rdev, src_offset);
+- radeon_ring_write(rdev, dst_offset);
+- radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+- src_offset += cur_size;
+- dst_offset += cur_size;
+- }
+- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+- radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+- if (fence) {
+- r = radeon_fence_emit(rdev, fence);
+- }
+- radeon_ring_unlock_commit(rdev);
+- return r;
+-}
+-
+ void r300_ring_start(struct radeon_device *rdev)
+ {
+ unsigned gb_tile_config;
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 91f5af9..526b3ec 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -1797,8 +1797,6 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(rdev, fence->seq);
+- radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+- radeon_ring_write(rdev, 1);
+ /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(rdev, RB_INT_STAT);
+diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
+index ec49dad..de8bbbc 100644
+--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
++++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
+@@ -576,9 +576,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
+ ring_size = num_loops * dwords_per_loop;
+ /* set default + shaders */
+ ring_size += 40; /* shaders + def state */
+- ring_size += 12; /* fence emit for VB IB */
++ ring_size += 10; /* fence emit for VB IB */
+ ring_size += 5; /* done copy */
+- ring_size += 12; /* fence emit for done copy */
++ ring_size += 10; /* fence emit for done copy */
+ r = radeon_ring_lock(rdev, ring_size);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
+index 6d5a711..75bcf35 100644
+--- a/drivers/gpu/drm/radeon/r600_cp.c
++++ b/drivers/gpu/drm/radeon/r600_cp.c
+@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
+
+ gb_tiling_config |= R600_BANK_SWAPS(1);
+
+- backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+- dev_priv->r600_max_backends,
+- (0xff << dev_priv->r600_max_backends) & 0xff);
++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
++ backend_map = 0x28;
++ else
++ backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
++ dev_priv->r600_max_backends,
++ (0xff << dev_priv->r600_max_backends) & 0xff);
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+
+ cc_gc_shader_pipe_config =
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 05ee1ae..afb3ddb 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -43,7 +43,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock
+ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+
+ /*
+- * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
++ * r100,rv100,rs100,rv200,rs200
+ */
+ extern int r100_init(struct radeon_device *rdev);
+ extern void r100_fini(struct radeon_device *rdev);
+@@ -120,6 +120,51 @@ static struct radeon_asic r100_asic = {
+ .ioctl_wait_idle = NULL,
+ };
+
++/*
++ * r200,rv250,rs300,rv280
++ */
++extern int r200_copy_dma(struct radeon_device *rdev,
++ uint64_t src_offset,
++ uint64_t dst_offset,
++ unsigned num_pages,
++ struct radeon_fence *fence);
++static struct radeon_asic r200_asic = {
++ .init = &r100_init,
++ .fini = &r100_fini,
++ .suspend = &r100_suspend,
++ .resume = &r100_resume,
++ .vga_set_state = &r100_vga_set_state,
++ .gpu_reset = &r100_gpu_reset,
++ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
++ .gart_set_page = &r100_pci_gart_set_page,
++ .cp_commit = &r100_cp_commit,
++ .ring_start = &r100_ring_start,
++ .ring_test = &r100_ring_test,
++ .ring_ib_execute = &r100_ring_ib_execute,
++ .irq_set = &r100_irq_set,
++ .irq_process = &r100_irq_process,
++ .get_vblank_counter = &r100_get_vblank_counter,
++ .fence_ring_emit = &r100_fence_ring_emit,
++ .cs_parse = &r100_cs_parse,
++ .copy_blit = &r100_copy_blit,
++ .copy_dma = &r200_copy_dma,
++ .copy = &r100_copy_blit,
++ .get_engine_clock = &radeon_legacy_get_engine_clock,
++ .set_engine_clock = &radeon_legacy_set_engine_clock,
++ .get_memory_clock = &radeon_legacy_get_memory_clock,
++ .set_memory_clock = NULL,
++ .set_pcie_lanes = NULL,
++ .set_clock_gating = &radeon_legacy_set_clock_gating,
++ .set_surface_reg = r100_set_surface_reg,
++ .clear_surface_reg = r100_clear_surface_reg,
++ .bandwidth_update = &r100_bandwidth_update,
++ .hpd_init = &r100_hpd_init,
++ .hpd_fini = &r100_hpd_fini,
++ .hpd_sense = &r100_hpd_sense,
++ .hpd_set_polarity = &r100_hpd_set_polarity,
++ .ioctl_wait_idle = NULL,
++};
++
+
+ /*
+ * r300,r350,rv350,rv380
+@@ -138,11 +183,6 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t
+ extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
+ extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+ extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+-extern int r300_copy_dma(struct radeon_device *rdev,
+- uint64_t src_offset,
+- uint64_t dst_offset,
+- unsigned num_pages,
+- struct radeon_fence *fence);
+ static struct radeon_asic r300_asic = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+@@ -162,7 +202,45 @@ static struct radeon_asic r300_asic = {
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+- .copy_dma = &r300_copy_dma,
++ .copy_dma = &r200_copy_dma,
++ .copy = &r100_copy_blit,
++ .get_engine_clock = &radeon_legacy_get_engine_clock,
++ .set_engine_clock = &radeon_legacy_set_engine_clock,
++ .get_memory_clock = &radeon_legacy_get_memory_clock,
++ .set_memory_clock = NULL,
++ .set_pcie_lanes = &rv370_set_pcie_lanes,
++ .set_clock_gating = &radeon_legacy_set_clock_gating,
++ .set_surface_reg = r100_set_surface_reg,
++ .clear_surface_reg = r100_clear_surface_reg,
++ .bandwidth_update = &r100_bandwidth_update,
++ .hpd_init = &r100_hpd_init,
++ .hpd_fini = &r100_hpd_fini,
++ .hpd_sense = &r100_hpd_sense,
++ .hpd_set_polarity = &r100_hpd_set_polarity,
++ .ioctl_wait_idle = NULL,
++};
++
++
++static struct radeon_asic r300_asic_pcie = {
++ .init = &r300_init,
++ .fini = &r300_fini,
++ .suspend = &r300_suspend,
++ .resume = &r300_resume,
++ .vga_set_state = &r100_vga_set_state,
++ .gpu_reset = &r300_gpu_reset,
++ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
++ .gart_set_page = &rv370_pcie_gart_set_page,
++ .cp_commit = &r100_cp_commit,
++ .ring_start = &r300_ring_start,
++ .ring_test = &r100_ring_test,
++ .ring_ib_execute = &r100_ring_ib_execute,
++ .irq_set = &r100_irq_set,
++ .irq_process = &r100_irq_process,
++ .get_vblank_counter = &r100_get_vblank_counter,
++ .fence_ring_emit = &r300_fence_ring_emit,
++ .cs_parse = &r300_cs_parse,
++ .copy_blit = &r100_copy_blit,
++ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+@@ -206,7 +284,7 @@ static struct radeon_asic r420_asic = {
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+- .copy_dma = &r300_copy_dma,
++ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+@@ -255,7 +333,7 @@ static struct radeon_asic rs400_asic = {
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+- .copy_dma = &r300_copy_dma,
++ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+@@ -314,7 +392,7 @@ static struct radeon_asic rs600_asic = {
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+- .copy_dma = &r300_copy_dma,
++ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+@@ -322,6 +400,8 @@ static struct radeon_asic rs600_asic = {
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
++ .set_surface_reg = r100_set_surface_reg,
++ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rs600_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+@@ -360,8 +440,8 @@ static struct radeon_asic rs690_asic = {
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+- .copy_dma = &r300_copy_dma,
+- .copy = &r300_copy_dma,
++ .copy_dma = &r200_copy_dma,
++ .copy = &r200_copy_dma,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+@@ -412,7 +492,7 @@ static struct radeon_asic rv515_asic = {
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+- .copy_dma = &r300_copy_dma,
++ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+@@ -455,7 +535,7 @@ static struct radeon_asic r520_asic = {
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+- .copy_dma = &r300_copy_dma,
++ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 2dcda61..4d88315 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
++ /* Asrock RS600 board lists the DVI port as HDMI */
++ if ((dev->pdev->device == 0x7941) &&
++ (dev->pdev->subsystem_vendor == 0x1849) &&
++ (dev->pdev->subsystem_device == 0x7941)) {
++ if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
++ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
++ *connector_type = DRM_MODE_CONNECTOR_DVID;
++ }
++
+ /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
+ if ((dev->pdev->device == 0x7941) &&
+ (dev->pdev->subsystem_vendor == 0x147b) &&
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 2381885..65f8194 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -780,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
+ * connected and the DVI port disconnected. If the edid doesn't
+ * say HDMI, vice versa.
+ */
+- if (radeon_connector->shared_ddc && connector_status_connected) {
++ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
+ struct drm_device *dev = connector->dev;
+ struct drm_connector *list_connector;
+ struct radeon_connector *list_radeon_connector;
+@@ -1060,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ return;
+ }
+ if (radeon_connector->ddc_bus && i2c_bus->valid) {
+- if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
+- sizeof(struct radeon_i2c_bus_rec)) == 0) {
++ if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
+ radeon_connector->shared_ddc = true;
+ shared_ddc = true;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 768b150..767aed8 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -329,21 +329,22 @@ int radeon_asic_init(struct radeon_device *rdev)
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
++ rdev->asic = &r100_asic;
++ break;
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+- rdev->asic = &r100_asic;
++ rdev->asic = &r200_asic;
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+- rdev->asic = &r300_asic;
+- if (rdev->flags & RADEON_IS_PCIE) {
+- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+- }
++ if (rdev->flags & RADEON_IS_PCIE)
++ rdev->asic = &r300_asic_pcie;
++ else
++ rdev->asic = &r300_asic;
+ break;
+ case CHIP_R420:
+ case CHIP_R423:
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
+index e137852..c57ad60 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.h
++++ b/drivers/gpu/drm/radeon/radeon_drv.h
+@@ -106,9 +106,10 @@
+ * 1.29- R500 3D cmd buffer support
+ * 1.30- Add support for occlusion queries
+ * 1.31- Add support for num Z pipes from GET_PARAM
++ * 1.32- fixes for rv740 setup
+ */
+ #define DRIVER_MAJOR 1
+-#define DRIVER_MINOR 31
++#define DRIVER_MINOR 32
+ #define DRIVER_PATCHLEVEL 0
+
+ enum radeon_cp_microcode_version {
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index 694799f..6579eb4 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -100,6 +100,8 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+ if (tmp == NULL) {
+ return;
+ }
++ if (!tmp->fence->emited)
++ radeon_fence_unref(&tmp->fence);
+ mutex_lock(&rdev->ib_pool.mutex);
+ tmp->free = true;
+ mutex_unlock(&rdev->ib_pool.mutex);
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 5943d56..0302167 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
+
+ gb_tiling_config |= BANK_SWAPS(1);
+
+- backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
+- rdev->config.rv770.max_backends,
+- (0xff << rdev->config.rv770.max_backends) & 0xff);
++ if (rdev->family == CHIP_RV740)
++ backend_map = 0x28;
++ else
++ backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
++ rdev->config.rv770.max_backends,
++ (0xff << rdev->config.rv770.max_backends) & 0xff);
+ gb_tiling_config |= BACKEND_MAP(backend_map);
+
+ cc_gc_shader_pipe_config =
+diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
+index e2123af..a759170 100644
+--- a/drivers/gpu/drm/ttm/ttm_tt.c
++++ b/drivers/gpu/drm/ttm/ttm_tt.c
+@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
+
+ #ifdef CONFIG_X86
+ static inline int ttm_tt_set_page_caching(struct page *p,
+- enum ttm_caching_state c_state)
++ enum ttm_caching_state c_old,
++ enum ttm_caching_state c_new)
+ {
+ int ret = 0;
+
+ if (PageHighMem(p))
+ return 0;
+
+- if (get_page_memtype(p) != -1) {
++ if (c_old != tt_cached) {
+ /* p isn't in the default caching state, set it to
+ * writeback first to free its current memtype. */
+
+@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
+ return ret;
+ }
+
+- if (c_state == tt_wc)
++ if (c_new == tt_wc)
+ ret = set_memory_wc((unsigned long) page_address(p), 1);
+- else if (c_state == tt_uncached)
++ else if (c_new == tt_uncached)
+ ret = set_pages_uc(p, 1);
+
+ return ret;
+ }
+ #else /* CONFIG_X86 */
+ static inline int ttm_tt_set_page_caching(struct page *p,
+- enum ttm_caching_state c_state)
++ enum ttm_caching_state c_old,
++ enum ttm_caching_state c_new)
+ {
+ return 0;
+ }
+@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages[i];
+ if (likely(cur_page != NULL)) {
+- ret = ttm_tt_set_page_caching(cur_page, c_state);
++ ret = ttm_tt_set_page_caching(cur_page,
++ ttm->caching_state,
++ c_state);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+@@ -268,7 +272,7 @@ out_err:
+ for (j = 0; j < i; ++j) {
+ cur_page = ttm->pages[j];
+ if (likely(cur_page != NULL)) {
+- (void)ttm_tt_set_page_caching(cur_page,
++ (void)ttm_tt_set_page_caching(cur_page, c_state,
+ ttm->caching_state);
+ }
+ }
+@@ -476,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
+ void *from_virtual;
+ void *to_virtual;
+ int i;
+- int ret;
++ int ret = -ENOMEM;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
+@@ -495,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ from_page = read_mapping_page(swap_space, i, NULL);
+- if (IS_ERR(from_page))
++ if (IS_ERR(from_page)) {
++ ret = PTR_ERR(from_page);
+ goto out_err;
++ }
+ to_page = __ttm_tt_get_page(ttm, i);
+ if (unlikely(to_page == NULL))
+ goto out_err;
+@@ -519,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
+ return 0;
+ out_err:
+ ttm_tt_free_alloced_pages(ttm);
+- return -ENOMEM;
++ return ret;
+ }
+
+ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
+@@ -531,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
+ void *from_virtual;
+ void *to_virtual;
+ int i;
++ int ret = -ENOMEM;
+
+ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
+ BUG_ON(ttm->caching_state != tt_cached);
+@@ -553,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
+ 0);
+ if (unlikely(IS_ERR(swap_storage))) {
+ printk(KERN_ERR "Failed allocating swap storage.\n");
+- return -ENOMEM;
++ return PTR_ERR(swap_storage);
+ }
+ } else
+ swap_storage = persistant_swap_storage;
+@@ -565,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
+ if (unlikely(from_page == NULL))
+ continue;
+ to_page = read_mapping_page(swap_space, i, NULL);
+- if (unlikely(to_page == NULL))
++ if (unlikely(IS_ERR(to_page))) {
++ ret = PTR_ERR(to_page);
+ goto out_err;
+-
++ }
+ preempt_disable();
+ from_virtual = kmap_atomic(from_page, KM_USER0);
+ to_virtual = kmap_atomic(to_page, KM_USER1);
+@@ -591,5 +599,5 @@ out_err:
+ if (!persistant_swap_storage)
+ fput(swap_storage);
+
+- return -ENOMEM;
++ return ret;
+ }
diff --git a/freed-ora/current/F-12/drm-upgrayedd.patch b/freed-ora/current/F-12/drm-upgrayedd.patch
index bcd6fbc32..b518d05cb 100644
--- a/freed-ora/current/F-12/drm-upgrayedd.patch
+++ b/freed-ora/current/F-12/drm-upgrayedd.patch
@@ -11507,8 +11507,8 @@ index 0e0e4b4..b1d0acb 100644
};
@@ -622,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
- if (IS_I8XX(dev))
- return connector_status_connected;
+ {
+ enum drm_connector_status status = connector_status_connected;
- if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+ if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
diff --git a/freed-ora/current/F-12/ext4-fix-freeze-deadlock-under-io.patch b/freed-ora/current/F-12/ext4-fix-freeze-deadlock-under-io.patch
new file mode 100644
index 000000000..f50dee118
--- /dev/null
+++ b/freed-ora/current/F-12/ext4-fix-freeze-deadlock-under-io.patch
@@ -0,0 +1,49 @@
+From: Eric Sandeen <sandeen@sandeen.net>
+Date: Sun, 1 Aug 2010 21:33:29 +0000 (-0400)
+Subject: ext4: fix freeze deadlock under IO
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=437f88cc031ffe7f37f3e705367f4fe1f4be8b0f
+
+ext4: fix freeze deadlock under IO
+
+Commit 6b0310fbf087ad6 caused a regression resulting in deadlocks
+when freezing a filesystem which had active IO; the vfs_check_frozen
+level (SB_FREEZE_WRITE) did not let the freeze-related IO syncing
+through. Duh.
+
+Changing the test to FREEZE_TRANS should let the normal freeze
+syncing get through the fs, but still block any transactions from
+starting once the fs is completely frozen.
+
+I tested this by running fsstress in the background while periodically
+snapshotting the fs and running fsck on the result. I ran into
+occasional deadlocks, but different ones. I think this is a
+fine fix for the problem at hand, and the other deadlocky things
+will need more investigation.
+
+Reported-by: Phillip Susi <psusi@cfl.rr.com>
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+---
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index e046eba..282a270 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -241,7 +241,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
+ if (sb->s_flags & MS_RDONLY)
+ return ERR_PTR(-EROFS);
+
+- vfs_check_frozen(sb, SB_FREEZE_WRITE);
++ vfs_check_frozen(sb, SB_FREEZE_TRANS);
+ /* Special case here: if the journal has aborted behind our
+ * backs (eg. EIO in the commit thread), then we still need to
+ * take the FS itself readonly cleanly. */
+@@ -3608,7 +3608,7 @@ int ext4_force_commit(struct super_block *sb)
+
+ journal = EXT4_SB(sb)->s_journal;
+ if (journal) {
+- vfs_check_frozen(sb, SB_FREEZE_WRITE);
++ vfs_check_frozen(sb, SB_FREEZE_TRANS);
+ ret = ext4_journal_force_commit(journal);
+ }
+
diff --git a/freed-ora/current/F-12/ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch b/freed-ora/current/F-12/ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch
new file mode 100644
index 000000000..14407a5a9
--- /dev/null
+++ b/freed-ora/current/F-12/ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch
@@ -0,0 +1,34 @@
+From 1f5a81e41f8b1a782c68d3843e9ec1bfaadf7d72 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Wed, 2 Jun 2010 22:04:39 -0400
+Subject: ext4: Make sure the MOVE_EXT ioctl can't overwrite append-only files
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 1f5a81e41f8b1a782c68d3843e9ec1bfaadf7d72 upstream.
+
+Dan Roseberg has reported a problem with the MOVE_EXT ioctl. If the
+donor file is an append-only file, we should not allow the operation
+to proceed, lest we end up overwriting the contents of an append-only
+file.
+
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Cc: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/move_extent.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -959,6 +959,9 @@ mext_check_arguments(struct inode *orig_
+ return -EINVAL;
+ }
+
++ if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
++ return -EPERM;
++
+ /* Ext4 move extent does not support swapfile */
+ if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
+ ext4_debug("ext4 move extent: The argument files should "
diff --git a/freed-ora/current/F-12/find-provides b/freed-ora/current/F-12/find-provides
new file mode 100755
index 000000000..b28d1028f
--- /dev/null
+++ b/freed-ora/current/F-12/find-provides
@@ -0,0 +1,44 @@
+#!/usr/bin/python
+#
+# find-provides: munge the provides dependencies from the kabideps file
+#
+# This software may be freely redistributed under the terms of the GNU
+# General Public License (GPL).
+#
+# Takes a directory prefix, then outputs the kabideps file contents.
+
+__author__ = "Jon Masters <jcm@redhat.com>"
+__version__ = "1.0"
+__date__ = "Tue 25 Jul 2006 04:00 GMT"
+__copyright__ = "Copyright (C) 2006 Red Hat, Inc"
+__license__ = "GPL"
+
+import os
+import re
+import string
+import sys
+
+false = 0
+true = 1
+
+kabideps=""
+
+p = re.compile('^(.*)/symvers-(.*).gz$')
+while true:
+ foo = sys.stdin.readline()
+ if foo == "":
+ break
+ string.split(foo)
+ m = p.match(foo)
+ if m:
+ kabideps=sys.argv[1] + "/kernel-" + m.group(2) + "-kabideps"
+
+if kabideps == "":
+ sys.exit(0)
+
+if not (os.path.isfile(kabideps)):
+ sys.stderr.write(sys.argv[0] + ": cannot locate kabideps file: " + kabideps + "\n")
+ sys.exit(1)
+
+sys.stderr.write(sys.argv[0] + ": processing kABI: " + kabideps)
+os.system("cat " + kabideps)
diff --git a/freed-ora/current/F-12/fix-abrtd.patch b/freed-ora/current/F-12/fix-abrtd.patch
new file mode 100644
index 000000000..4a8db58f1
--- /dev/null
+++ b/freed-ora/current/F-12/fix-abrtd.patch
@@ -0,0 +1,774 @@
+diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
+index b639dcf..346b694 100644
+--- a/fs/binfmt_aout.c
++++ b/fs/binfmt_aout.c
+@@ -32,7 +32,7 @@
+
+ static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
+ static int load_aout_library(struct file*);
+-static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
++static int aout_core_dump(struct coredump_params *cprm);
+
+ static struct linux_binfmt aout_format = {
+ .module = THIS_MODULE,
+@@ -89,8 +89,9 @@ if (file->f_op->llseek) { \
+ * dumping of the process results in another error..
+ */
+
+-static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
++static int aout_core_dump(struct coredump_params *cprm)
+ {
++ struct file *file = cprm->file;
+ mm_segment_t fs;
+ int has_dumped = 0;
+ unsigned long dump_start, dump_size;
+@@ -108,16 +109,16 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
+ current->flags |= PF_DUMPCORE;
+ strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
+ dump.u_ar0 = offsetof(struct user, regs);
+- dump.signal = signr;
+- aout_dump_thread(regs, &dump);
++ dump.signal = cprm->signr;
++ aout_dump_thread(cprm->regs, &dump);
+
+ /* If the size of the dump file exceeds the rlimit, then see what would happen
+ if we wrote the stack, but not the data area. */
+- if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
++ if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
+ dump.u_dsize = 0;
+
+ /* Make sure we have enough room to write the stack and data areas. */
+- if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
++ if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
+ dump.u_ssize = 0;
+
+ /* make sure we actually have a data and stack area to dump */
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index b9b3bb5..4ee5bb2 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -45,7 +45,7 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
+ * don't even try.
+ */
+ #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+-static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
++static int elf_core_dump(struct coredump_params *cprm);
+ #else
+ #define elf_core_dump NULL
+ #endif
+@@ -1277,8 +1277,9 @@ static int writenote(struct memelfnote *men, struct file *file,
+ }
+ #undef DUMP_WRITE
+
+-#define DUMP_WRITE(addr, nr) \
+- if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
++#define DUMP_WRITE(addr, nr) \
++ if ((size += (nr)) > cprm->limit || \
++ !dump_write(cprm->file, (addr), (nr))) \
+ goto end_coredump;
+
+ static void fill_elf_header(struct elfhdr *elf, int segs,
+@@ -1906,7 +1907,7 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
+ * and then they are actually written out. If we run out of core limit
+ * we just truncate.
+ */
+-static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
++static int elf_core_dump(struct coredump_params *cprm)
+ {
+ int has_dumped = 0;
+ mm_segment_t fs;
+@@ -1952,7 +1953,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+ * notes. This also sets up the file header.
+ */
+ if (!fill_note_info(elf, segs + 1, /* including notes section */
+- &info, signr, regs))
++ &info, cprm->signr, cprm->regs))
+ goto cleanup;
+
+ has_dumped = 1;
+@@ -2014,14 +2015,14 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+ #endif
+
+ /* write out the notes section */
+- if (!write_note_info(&info, file, &foffset))
++ if (!write_note_info(&info, cprm->file, &foffset))
+ goto end_coredump;
+
+- if (elf_coredump_extra_notes_write(file, &foffset))
++ if (elf_coredump_extra_notes_write(cprm->file, &foffset))
+ goto end_coredump;
+
+ /* Align to page */
+- if (!dump_seek(file, dataoff - foffset))
++ if (!dump_seek(cprm->file, dataoff - foffset))
+ goto end_coredump;
+
+ for (vma = first_vma(current, gate_vma); vma != NULL;
+@@ -2038,12 +2039,13 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+ page = get_dump_page(addr);
+ if (page) {
+ void *kaddr = kmap(page);
+- stop = ((size += PAGE_SIZE) > limit) ||
+- !dump_write(file, kaddr, PAGE_SIZE);
++ stop = ((size += PAGE_SIZE) > cprm->limit) ||
++ !dump_write(cprm->file, kaddr,
++ PAGE_SIZE);
+ kunmap(page);
+ page_cache_release(page);
+ } else
+- stop = !dump_seek(file, PAGE_SIZE);
++ stop = !dump_seek(cprm->file, PAGE_SIZE);
+ if (stop)
+ goto end_coredump;
+ }
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 38502c6..917e1b4 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -76,7 +76,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *,
+ struct file *, struct mm_struct *);
+
+ #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+-static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit);
++static int elf_fdpic_core_dump(struct coredump_params *cprm);
+ #endif
+
+ static struct linux_binfmt elf_fdpic_format = {
+@@ -1325,8 +1325,9 @@ static int writenote(struct memelfnote *men, struct file *file)
+ #undef DUMP_WRITE
+ #undef DUMP_SEEK
+
+-#define DUMP_WRITE(addr, nr) \
+- if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
++#define DUMP_WRITE(addr, nr) \
++ if ((size += (nr)) > cprm->limit || \
++ !dump_write(cprm->file, (addr), (nr))) \
+ goto end_coredump;
+
+ static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
+@@ -1581,8 +1582,7 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size,
+ * and then they are actually written out. If we run out of core limit
+ * we just truncate.
+ */
+-static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
+- struct file *file, unsigned long limit)
++static int elf_fdpic_core_dump(struct coredump_params *cprm)
+ {
+ #define NUM_NOTES 6
+ int has_dumped = 0;
+@@ -1641,7 +1641,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
+ goto cleanup;
+ #endif
+
+- if (signr) {
++ if (cprm->signr) {
+ struct core_thread *ct;
+ struct elf_thread_status *tmp;
+
+@@ -1660,14 +1660,14 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
+ int sz;
+
+ tmp = list_entry(t, struct elf_thread_status, list);
+- sz = elf_dump_thread_status(signr, tmp);
++ sz = elf_dump_thread_status(cprm->signr, tmp);
+ thread_status_size += sz;
+ }
+ }
+
+ /* now collect the dump for the current */
+- fill_prstatus(prstatus, current, signr);
+- elf_core_copy_regs(&prstatus->pr_reg, regs);
++ fill_prstatus(prstatus, current, cprm->signr);
++ elf_core_copy_regs(&prstatus->pr_reg, cprm->regs);
+
+ segs = current->mm->map_count;
+ #ifdef ELF_CORE_EXTRA_PHDRS
+@@ -1702,7 +1702,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
+
+ /* Try to dump the FPU. */
+ if ((prstatus->pr_fpvalid =
+- elf_core_copy_task_fpregs(current, regs, fpu)))
++ elf_core_copy_task_fpregs(current, cprm->regs, fpu)))
+ fill_note(notes + numnote++,
+ "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
+ #ifdef ELF_CORE_COPY_XFPREGS
+@@ -1773,7 +1773,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
+
+ /* write out the notes section */
+ for (i = 0; i < numnote; i++)
+- if (!writenote(notes + i, file))
++ if (!writenote(notes + i, cprm->file))
+ goto end_coredump;
+
+ /* write out the thread status notes section */
+@@ -1782,14 +1782,15 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
+ list_entry(t, struct elf_thread_status, list);
+
+ for (i = 0; i < tmp->num_notes; i++)
+- if (!writenote(&tmp->notes[i], file))
++ if (!writenote(&tmp->notes[i], cprm->file))
+ goto end_coredump;
+ }
+
+- if (!dump_seek(file, dataoff))
++ if (!dump_seek(cprm->file, dataoff))
+ goto end_coredump;
+
+- if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0)
++ if (elf_fdpic_dump_segments(cprm->file, &size, &cprm->limit,
++ mm_flags) < 0)
+ goto end_coredump;
+
+ #ifdef ELF_CORE_WRITE_EXTRA_DATA
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index a279665..d4a00ea 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -87,7 +87,7 @@ static int load_flat_shared_library(int id, struct lib_info *p);
+ #endif
+
+ static int load_flat_binary(struct linux_binprm *, struct pt_regs * regs);
+-static int flat_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
++static int flat_core_dump(struct coredump_params *cprm);
+
+ static struct linux_binfmt flat_format = {
+ .module = THIS_MODULE,
+@@ -102,10 +102,10 @@ static struct linux_binfmt flat_format = {
+ * Currently only a stub-function.
+ */
+
+-static int flat_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
++static int flat_core_dump(struct coredump_params *cprm)
+ {
+ printk("Process %s:%d received signr %d and should have core dumped\n",
+- current->comm, current->pid, (int) signr);
++ current->comm, current->pid, (int) cprm->signr);
+ return(1);
+ }
+
+diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
+index eff74b9..2a9b533 100644
+--- a/fs/binfmt_som.c
++++ b/fs/binfmt_som.c
+@@ -43,7 +43,7 @@ static int load_som_library(struct file *);
+ * don't even try.
+ */
+ #if 0
+-static int som_core_dump(long signr, struct pt_regs *regs, unsigned long limit);
++static int som_core_dump(struct coredump_params *cprm);
+ #else
+ #define som_core_dump NULL
+ #endif
+diff --git a/fs/exec.c b/fs/exec.c
+index ba112bd..08ec506 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1749,6 +1749,50 @@ static void wait_for_dump_helpers(struct file *file)
+ }
+
+
++/*
++ * uhm_pipe_setup
++ * helper function to customize the process used
++ * to collect the core in userspace. Specifically
++ * it sets up a pipe and installs it as fd 0 (stdin)
++ * for the process. Returns 0 on success, or
++ * PTR_ERR on failure.
++ * Note that it also sets the core limit to 1. This
++ * is a special value that we use to trap recursive
++ * core dumps
++ */
++static int umh_pipe_setup(struct subprocess_info *info)
++{
++ struct file *rp, *wp;
++ struct fdtable *fdt;
++ struct coredump_params *cp = (struct coredump_params *)info->data;
++ struct files_struct *cf = current->files;
++
++ wp = create_write_pipe(0);
++ if (IS_ERR(wp))
++ return PTR_ERR(wp);
++
++ rp = create_read_pipe(wp, 0);
++ if (IS_ERR(rp)) {
++ free_write_pipe(wp);
++ return PTR_ERR(rp);
++ }
++
++ cp->file = wp;
++
++ sys_close(0);
++ fd_install(0, rp);
++ spin_lock(&cf->file_lock);
++ fdt = files_fdtable(cf);
++ FD_SET(0, fdt->open_fds);
++ FD_CLR(0, fdt->close_on_exec);
++ spin_unlock(&cf->file_lock);
++
++ /* and disallow core files too */
++ current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
++
++ return 0;
++}
++
+ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ {
+ struct core_state core_state;
+@@ -1756,17 +1800,20 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ struct mm_struct *mm = current->mm;
+ struct linux_binfmt * binfmt;
+ struct inode * inode;
+- struct file * file;
+ const struct cred *old_cred;
+ struct cred *cred;
+ int retval = 0;
+ int flag = 0;
+ int ispipe = 0;
+- unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
+ char **helper_argv = NULL;
+ int helper_argc = 0;
+ int dump_count = 0;
+ static atomic_t core_dump_count = ATOMIC_INIT(0);
++ struct coredump_params cprm = {
++ .signr = signr,
++ .regs = regs,
++ .limit = current->signal->rlim[RLIMIT_CORE].rlim_cur,
++ };
+
+ audit_core_dumps(signr);
+
+@@ -1822,19 +1869,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ ispipe = format_corename(corename, signr);
+ unlock_kernel();
+
+- if ((!ispipe) && (core_limit < binfmt->min_coredump))
++ if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
+ goto fail_unlock;
+
+ if (ispipe) {
+- if (core_limit == 0) {
++ if (cprm.limit == 1) {
+ /*
+ * Normally core limits are irrelevant to pipes, since
+ * we're not writing to the file system, but we use
+- * core_limit of 0 here as a speacial value. Any
+- * non-zero limit gets set to RLIM_INFINITY below, but
++ * cprm.limit of 1 here as a speacial value. Any
++ * non-1 limit gets set to RLIM_INFINITY below, but
+ * a limit of 0 skips the dump. This is a consistent
+ * way to catch recursive crashes. We can still crash
+- * if the core_pattern binary sets RLIM_CORE = !0
++ * if the core_pattern binary sets RLIM_CORE = !1
+ * but it runs as root, and can do lots of stupid things
+ * Note that we use task_tgid_vnr here to grab the pid
+ * of the process group leader. That way we get the
+@@ -1842,7 +1889,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ * core_pattern process dies.
+ */
+ printk(KERN_WARNING
+- "Process %d(%s) has RLIMIT_CORE set to 0\n",
++ "Process %d(%s) has RLIMIT_CORE set to 1\n",
+ task_tgid_vnr(current), current->comm);
+ printk(KERN_WARNING "Aborting core\n");
+ goto fail_unlock;
+@@ -1863,25 +1910,30 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ goto fail_dropcount;
+ }
+
+- core_limit = RLIM_INFINITY;
++ cprm.limit = RLIM_INFINITY;
+
+ /* SIGPIPE can happen, but it's just never processed */
+- if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
+- &file)) {
++ cprm.file = NULL;
++ if (call_usermodehelper_fns(helper_argv[0], helper_argv, NULL,
++ UMH_WAIT_EXEC, umh_pipe_setup,
++ NULL, &cprm)) {
++ if (cprm.file)
++ filp_close(cprm.file, NULL);
++
+ printk(KERN_INFO "Core dump to %s pipe failed\n",
+ corename);
+ goto fail_dropcount;
+ }
+ } else
+- file = filp_open(corename,
++ cprm.file = filp_open(corename,
+ O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
+ 0600);
+- if (IS_ERR(file))
++ if (IS_ERR(cprm.file))
+ goto fail_dropcount;
+- inode = file->f_path.dentry->d_inode;
++ inode = cprm.file->f_path.dentry->d_inode;
+ if (inode->i_nlink > 1)
+ goto close_fail; /* multiple links - don't dump */
+- if (!ispipe && d_unhashed(file->f_path.dentry))
++ if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
+ goto close_fail;
+
+ /* AK: actually i see no reason to not allow this for named pipes etc.,
+@@ -1894,21 +1946,22 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ */
+ if (!ispipe && (inode->i_uid != current_fsuid()))
+ goto close_fail;
+- if (!file->f_op)
++ if (!cprm.file->f_op)
+ goto close_fail;
+- if (!file->f_op->write)
++ if (!cprm.file->f_op->write)
+ goto close_fail;
+- if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
++ if (!ispipe &&
++ do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
+ goto close_fail;
+
+- retval = binfmt->core_dump(signr, regs, file, core_limit);
++ retval = binfmt->core_dump(&cprm);
+
+ if (retval)
+ current->signal->group_exit_code |= 0x80;
+ close_fail:
+ if (ispipe && core_pipe_limit)
+- wait_for_dump_helpers(file);
+- filp_close(file, NULL);
++ wait_for_dump_helpers(cprm.file);
++ filp_close(cprm.file, NULL);
+ fail_dropcount:
+ if (dump_count)
+ atomic_dec(&core_dump_count);
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index aece486..cd4349b 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -68,6 +68,14 @@ struct linux_binprm{
+
+ #define BINPRM_MAX_RECURSION 4
+
++/* Function parameter for binfmt->coredump */
++struct coredump_params {
++ long signr;
++ struct pt_regs *regs;
++ struct file *file;
++ unsigned long limit;
++};
++
+ /*
+ * This structure defines the functions that are used to load the binary formats that
+ * linux accepts.
+@@ -77,7 +85,7 @@ struct linux_binfmt {
+ struct module *module;
+ int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
+ int (*load_shlib)(struct file *);
+- int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
++ int (*core_dump)(struct coredump_params *cprm);
+ unsigned long min_coredump; /* minimal dump size */
+ int hasvdso;
+ };
+diff --git a/include/linux/kmod.h b/include/linux/kmod.h
+index 384ca8b..ec69956 100644
+--- a/include/linux/kmod.h
++++ b/include/linux/kmod.h
+@@ -23,6 +23,7 @@
+ #include <linux/stddef.h>
+ #include <linux/errno.h>
+ #include <linux/compiler.h>
++#include <linux/workqueue.h>
+
+ #define KMOD_PATH_LEN 256
+
+@@ -44,7 +45,26 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
+
+ struct key;
+ struct file;
+-struct subprocess_info;
++
++enum umh_wait {
++ UMH_NO_WAIT = -1, /* don't wait at all */
++ UMH_WAIT_EXEC = 0, /* wait for the exec, but not the process */
++ UMH_WAIT_PROC = 1, /* wait for the process to complete */
++};
++
++struct subprocess_info {
++ struct work_struct work;
++ struct completion *complete;
++ struct cred *cred;
++ char *path;
++ char **argv;
++ char **envp;
++ enum umh_wait wait;
++ int retval;
++ int (*init)(struct subprocess_info *info);
++ void (*cleanup)(struct subprocess_info *info);
++ void *data;
++};
+
+ /* Allocate a subprocess_info structure */
+ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
+@@ -55,14 +75,10 @@ void call_usermodehelper_setkeys(struct subprocess_info *info,
+ struct key *session_keyring);
+ int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
+ struct file **filp);
+-void call_usermodehelper_setcleanup(struct subprocess_info *info,
+- void (*cleanup)(char **argv, char **envp));
+-
+-enum umh_wait {
+- UMH_NO_WAIT = -1, /* don't wait at all */
+- UMH_WAIT_EXEC = 0, /* wait for the exec, but not the process */
+- UMH_WAIT_PROC = 1, /* wait for the process to complete */
+-};
++void call_usermodehelper_setfns(struct subprocess_info *info,
++ int (*init)(struct subprocess_info *info),
++ void (*cleanup)(struct subprocess_info *info),
++ void *data);
+
+ /* Actually execute the sub-process */
+ int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
+@@ -72,7 +88,10 @@ int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
+ void call_usermodehelper_freeinfo(struct subprocess_info *info);
+
+ static inline int
+-call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
++call_usermodehelper_fns(char *path, char **argv, char **envp,
++ enum umh_wait wait,
++ int (*init)(struct subprocess_info *info),
++ void (*cleanup)(struct subprocess_info *), void *data)
+ {
+ struct subprocess_info *info;
+ gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
+@@ -80,10 +99,18 @@ call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
+ info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
+ if (info == NULL)
+ return -ENOMEM;
++ call_usermodehelper_setfns(info, init, cleanup, data);
+ return call_usermodehelper_exec(info, wait);
+ }
+
+ static inline int
++call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
++{
++ return call_usermodehelper_fns(path, argv, envp,
++ wait, NULL, NULL, NULL);
++}
++
++static inline int
+ call_usermodehelper_keys(char *path, char **argv, char **envp,
+ struct key *session_keyring, enum umh_wait wait)
+ {
+@@ -100,10 +127,6 @@ call_usermodehelper_keys(char *path, char **argv, char **envp,
+
+ extern void usermodehelper_init(void);
+
+-struct file;
+-extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[],
+- struct file **filp);
+-
+ extern int usermodehelper_disable(void);
+ extern void usermodehelper_enable(void);
+
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index 9fcb53a..7281229 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -124,19 +124,6 @@ int __request_module(bool wait, const char *fmt, ...)
+ EXPORT_SYMBOL(__request_module);
+ #endif /* CONFIG_MODULES */
+
+-struct subprocess_info {
+- struct work_struct work;
+- struct completion *complete;
+- struct cred *cred;
+- char *path;
+- char **argv;
+- char **envp;
+- enum umh_wait wait;
+- int retval;
+- struct file *stdin;
+- void (*cleanup)(char **argv, char **envp);
+-};
+-
+ /*
+ * This is the task which runs the usermode application
+ */
+@@ -158,26 +145,15 @@ static int ____call_usermodehelper(void *data)
+ commit_creds(sub_info->cred);
+ sub_info->cred = NULL;
+
+- /* Install input pipe when needed */
+- if (sub_info->stdin) {
+- struct files_struct *f = current->files;
+- struct fdtable *fdt;
+- /* no races because files should be private here */
+- sys_close(0);
+- fd_install(0, sub_info->stdin);
+- spin_lock(&f->file_lock);
+- fdt = files_fdtable(f);
+- FD_SET(0, fdt->open_fds);
+- FD_CLR(0, fdt->close_on_exec);
+- spin_unlock(&f->file_lock);
+-
+- /* and disallow core files too */
+- current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0};
+- }
+-
+ /* We can run anywhere, unlike our parent keventd(). */
+ set_cpus_allowed_ptr(current, cpu_all_mask);
+
++ if (sub_info->init) {
++ retval = sub_info->init(sub_info);
++ if (retval)
++ goto fail;
++ }
++
+ /*
+ * Our parent is keventd, which runs with elevated scheduling priority.
+ * Avoid propagating that into the userspace child.
+@@ -187,6 +163,7 @@ static int ____call_usermodehelper(void *data)
+ retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);
+
+ /* Exec failed? */
++fail:
+ sub_info->retval = retval;
+ do_exit(0);
+ }
+@@ -194,7 +171,7 @@ static int ____call_usermodehelper(void *data)
+ void call_usermodehelper_freeinfo(struct subprocess_info *info)
+ {
+ if (info->cleanup)
+- (*info->cleanup)(info->argv, info->envp);
++ (*info->cleanup)(info);
+ if (info->cred)
+ put_cred(info->cred);
+ kfree(info);
+@@ -406,50 +383,31 @@ void call_usermodehelper_setkeys(struct subprocess_info *info,
+ EXPORT_SYMBOL(call_usermodehelper_setkeys);
+
+ /**
+- * call_usermodehelper_setcleanup - set a cleanup function
++ * call_usermodehelper_setfns - set a cleanup/init function
+ * @info: a subprocess_info returned by call_usermodehelper_setup
+ * @cleanup: a cleanup function
++ * @init: an init function
++ * @data: arbitrary context sensitive data
++ *
++ * The init function is used to customize the helper process prior to
++ * exec. A non-zero return code causes the process to error out, exit,
++ * and return the failure to the calling process
+ *
+- * The cleanup function is just befor ethe subprocess_info is about to
++ * The cleanup function is just before ethe subprocess_info is about to
+ * be freed. This can be used for freeing the argv and envp. The
+ * Function must be runnable in either a process context or the
+ * context in which call_usermodehelper_exec is called.
+ */
+-void call_usermodehelper_setcleanup(struct subprocess_info *info,
+- void (*cleanup)(char **argv, char **envp))
++void call_usermodehelper_setfns(struct subprocess_info *info,
++ int (*init)(struct subprocess_info *info),
++ void (*cleanup)(struct subprocess_info *info),
++ void *data)
+ {
+ info->cleanup = cleanup;
++ info->init = init;
++ info->data = data;
+ }
+-EXPORT_SYMBOL(call_usermodehelper_setcleanup);
+-
+-/**
+- * call_usermodehelper_stdinpipe - set up a pipe to be used for stdin
+- * @sub_info: a subprocess_info returned by call_usermodehelper_setup
+- * @filp: set to the write-end of a pipe
+- *
+- * This constructs a pipe, and sets the read end to be the stdin of the
+- * subprocess, and returns the write-end in *@filp.
+- */
+-int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
+- struct file **filp)
+-{
+- struct file *f;
+-
+- f = create_write_pipe(0);
+- if (IS_ERR(f))
+- return PTR_ERR(f);
+- *filp = f;
+-
+- f = create_read_pipe(f, 0);
+- if (IS_ERR(f)) {
+- free_write_pipe(*filp);
+- return PTR_ERR(f);
+- }
+- sub_info->stdin = f;
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(call_usermodehelper_stdinpipe);
++EXPORT_SYMBOL(call_usermodehelper_setfns);
+
+ /**
+ * call_usermodehelper_exec - start a usermode application
+@@ -498,39 +456,6 @@ unlock:
+ }
+ EXPORT_SYMBOL(call_usermodehelper_exec);
+
+-/**
+- * call_usermodehelper_pipe - call a usermode helper process with a pipe stdin
+- * @path: path to usermode executable
+- * @argv: arg vector for process
+- * @envp: environment for process
+- * @filp: set to the write-end of a pipe
+- *
+- * This is a simple wrapper which executes a usermode-helper function
+- * with a pipe as stdin. It is implemented entirely in terms of
+- * lower-level call_usermodehelper_* functions.
+- */
+-int call_usermodehelper_pipe(char *path, char **argv, char **envp,
+- struct file **filp)
+-{
+- struct subprocess_info *sub_info;
+- int ret;
+-
+- sub_info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL);
+- if (sub_info == NULL)
+- return -ENOMEM;
+-
+- ret = call_usermodehelper_stdinpipe(sub_info, filp);
+- if (ret < 0)
+- goto out;
+-
+- return call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
+-
+- out:
+- call_usermodehelper_freeinfo(sub_info);
+- return ret;
+-}
+-EXPORT_SYMBOL(call_usermodehelper_pipe);
+-
+ void __init usermodehelper_init(void)
+ {
+ khelper_wq = create_singlethread_workqueue("khelper");
+diff --git a/kernel/sys.c b/kernel/sys.c
+index ce17760..0b8a55e 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1600,9 +1600,9 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
+
+ char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
+
+-static void argv_cleanup(char **argv, char **envp)
++static void argv_cleanup(struct subprocess_info *info)
+ {
+- argv_free(argv);
++ argv_free(info->argv);
+ }
+
+ /**
+@@ -1636,7 +1636,7 @@ int orderly_poweroff(bool force)
+ goto out;
+ }
+
+- call_usermodehelper_setcleanup(info, argv_cleanup);
++ call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
+
+ ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
+
diff --git a/freed-ora/current/F-12/fix-ima-null-ptr-deref.patch b/freed-ora/current/F-12/fix-ima-null-ptr-deref.patch
new file mode 100644
index 000000000..01515f7b4
--- /dev/null
+++ b/freed-ora/current/F-12/fix-ima-null-ptr-deref.patch
@@ -0,0 +1,54 @@
+From: J. R. Okajima <hooanon05@yahoo.co.jp>
+Date: Sun, 7 Feb 2010 06:48:55 +0000 (+1100)
+Subject: ima: fix null pointer deref
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fjmorris%2Fsecurity-testing-2.6.git;a=commitdiff_plain;h=8bb6795424b09db0eca1cccf7a17b93fc28ac7f7
+
+ima: fix null pointer deref
+
+The commit 6c21a7f "LSM: imbed ima calls in the security hooks"
+which moves the ima_file_free() call within security_file_free()
+brought a problem into pipe.c.
+In the error path of pipe(2), the allocated resources are freed by
+path_put() and put_filp() (in this order). Since security_file_free()
+refers f_dentry and ima_file_free() refers f_dentry->d_inode, path_put()
+should be called after put_filp().
+
+Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
+Signed-off-by: James Morris <jmorris@namei.org>
+---
+
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 37ba29f..90b543d 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -1004,9 +1004,10 @@ struct file *create_write_pipe(int flags)
+
+ void free_write_pipe(struct file *f)
+ {
++ struct path path = f->f_path;
+ free_pipe_info(f->f_dentry->d_inode);
+- path_put(&f->f_path);
+ put_filp(f);
++ path_put(&path);
+ }
+
+ struct file *create_read_pipe(struct file *wrf, int flags)
+@@ -1028,6 +1029,7 @@ int do_pipe_flags(int *fd, int flags)
+ struct file *fw, *fr;
+ int error;
+ int fdw, fdr;
++ struct path path;
+
+ if (flags & ~(O_CLOEXEC | O_NONBLOCK))
+ return -EINVAL;
+@@ -1061,8 +1063,9 @@ int do_pipe_flags(int *fd, int flags)
+ err_fdr:
+ put_unused_fd(fdr);
+ err_read_pipe:
+- path_put(&fr->f_path);
++ path = fr->f_path;
+ put_filp(fr);
++ path_put(&path);
+ err_write_pipe:
+ free_write_pipe(fw);
+ return error;
diff --git a/freed-ora/current/F-12/genkey b/freed-ora/current/F-12/genkey
new file mode 100644
index 000000000..49c6ce8be
--- /dev/null
+++ b/freed-ora/current/F-12/genkey
@@ -0,0 +1,7 @@
+%pubring kernel.pub
+%secring kernel.sec
+Key-Type: DSA
+Key-Length: 512
+Name-Real: Red Hat, Inc.
+Name-Comment: Kernel Module GPG key
+%commit
diff --git a/freed-ora/current/F-12/git-bluetooth.patch b/freed-ora/current/F-12/git-bluetooth.patch
new file mode 100644
index 000000000..8ecc995c2
--- /dev/null
+++ b/freed-ora/current/F-12/git-bluetooth.patch
@@ -0,0 +1,3344 @@
+commit b1fb06830dc870d862f7f80e276130c0ab84d59f
+Author: Wei Yongjun <yjwei@cn.fujitsu.com>
+Date: Wed Feb 25 18:09:33 2009 +0800
+
+ Bluetooth: Remove some pointless conditionals before kfree_skb()
+
+ Remove some pointless conditionals before kfree_skb().
+
+ Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com>
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 7585b97a48180f754ebdade1be94092e36bef365
+Author: Wei Yongjun <yjwei@cn.fujitsu.com>
+Date: Wed Feb 25 18:29:52 2009 +0800
+
+ Bluetooth: Remove some pointless conditionals before kfree_skb()
+
+ Remove some pointless conditionals before kfree_skb().
+
+ Signed-off-by: Wei Yongjun <yjwei@cn.fujitsu.com>
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 2ae9a6be5f476f3512839a4d11a8f432bfd2914c
+Author: Dave Young <hidave.darkstar@gmail.com>
+Date: Sat Feb 21 16:13:34 2009 +0800
+
+ Bluetooth: Move hci_conn_del_sysfs() back to avoid device destruct too early
+
+ The following commit introduce a regression:
+
+ commit 7d0db0a373195385a2e0b19d1f5e4b186fdcffac
+ Author: Marcel Holtmann <marcel@holtmann.org>
+ Date: Mon Jul 14 20:13:51 2008 +0200
+
+ [Bluetooth] Use a more unique bus name for connections
+
+ I get panic as following (by netconsole):
+
+ [ 2709.344034] usb 5-1: new full speed USB device using uhci_hcd and address 4
+ [ 2709.505776] usb 5-1: configuration #1 chosen from 1 choice
+ [ 2709.569207] Bluetooth: Generic Bluetooth USB driver ver 0.4
+ [ 2709.570169] usbcore: registered new interface driver btusb
+ [ 2845.742781] BUG: unable to handle kernel paging request at 6b6b6c2f
+ [ 2845.742958] IP: [<c015515c>] __lock_acquire+0x6c/0xa80
+ [ 2845.743087] *pde = 00000000
+ [ 2845.743206] Oops: 0002 [#1] SMP
+ [ 2845.743377] last sysfs file: /sys/class/bluetooth/hci0/hci0:6/type
+ [ 2845.743742] Modules linked in: btusb netconsole snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device snd_pcm_oss snd_mixer_oss rfcomm l2cap bluetooth vfat fuse snd_hda_codec_idt snd_hda_intel snd_hda_codec snd_hwdep snd_pcm pl2303 snd_timer psmouse usbserial snd 3c59x e100 serio_raw soundcore i2c_i801 intel_agp mii agpgart snd_page_alloc rtc_cmos rtc_core thermal processor rtc_lib button thermal_sys sg evdev
+ [ 2845.743742]
+ [ 2845.743742] Pid: 0, comm: swapper Not tainted (2.6.29-rc5-smp #54) Dell DM051
+ [ 2845.743742] EIP: 0060:[<c015515c>] EFLAGS: 00010002 CPU: 0
+ [ 2845.743742] EIP is at __lock_acquire+0x6c/0xa80
+ [ 2845.743742] EAX: 00000046 EBX: 00000046 ECX: 6b6b6b6b EDX: 00000002
+ [ 2845.743742] ESI: 6b6b6b6b EDI: 00000000 EBP: c064fd14 ESP: c064fcc8
+ [ 2845.743742] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068
+ [ 2845.743742] Process swapper (pid: 0, ti=c064e000 task=c05d1400 task.ti=c064e000)
+ [ 2845.743742] Stack:
+ [ 2845.743742] c05d1400 00000002 c05d1400 00000001 00000002 00000000 f65388dc c05d1400
+ [ 2845.743742] 6b6b6b6b 00000292 c064fd0c c0153732 00000000 00000000 00000001 f700fa50
+ [ 2845.743742] 00000046 00000000 00000000 c064fd40 c0155be6 00000000 00000002 00000001
+ [ 2845.743742] Call Trace:
+ [ 2845.743742] [<c0153732>] ? trace_hardirqs_on_caller+0x72/0x1c0
+ [ 2845.743742] [<c0155be6>] ? lock_acquire+0x76/0xa0
+ [ 2845.743742] [<c03e1aad>] ? skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c046c885>] ? _spin_lock_irqsave+0x45/0x80
+ [ 2845.743742] [<c03e1aad>] ? skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c03e1aad>] ? skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c03e1f94>] ? skb_queue_purge+0x14/0x20
+ [ 2845.743742] [<f8171f5a>] ? hci_conn_del+0x10a/0x1c0 [bluetooth]
+ [ 2845.743742] [<f81399c9>] ? l2cap_disconn_ind+0x59/0xb0 [l2cap]
+ [ 2845.743742] [<f81795ce>] ? hci_conn_del_sysfs+0x8e/0xd0 [bluetooth]
+ [ 2845.743742] [<f8175758>] ? hci_event_packet+0x5f8/0x31c0 [bluetooth]
+ [ 2845.743742] [<c03dfe19>] ? sock_def_readable+0x59/0x80
+ [ 2845.743742] [<c046c14d>] ? _read_unlock+0x1d/0x20
+ [ 2845.743742] [<f8178aa9>] ? hci_send_to_sock+0xe9/0x1d0 [bluetooth]
+ [ 2845.743742] [<c015388b>] ? trace_hardirqs_on+0xb/0x10
+ [ 2845.743742] [<f816fa6a>] ? hci_rx_task+0x2ba/0x490 [bluetooth]
+ [ 2845.743742] [<c0133661>] ? tasklet_action+0x31/0xc0
+ [ 2845.743742] [<c013367c>] ? tasklet_action+0x4c/0xc0
+ [ 2845.743742] [<c0132eb7>] ? __do_softirq+0xa7/0x170
+ [ 2845.743742] [<c0116dec>] ? ack_apic_level+0x5c/0x1c0
+ [ 2845.743742] [<c0132fd7>] ? do_softirq+0x57/0x60
+ [ 2845.743742] [<c01333dc>] ? irq_exit+0x7c/0x90
+ [ 2845.743742] [<c01055bb>] ? do_IRQ+0x4b/0x90
+ [ 2845.743742] [<c01333d5>] ? irq_exit+0x75/0x90
+ [ 2845.743742] [<c010392c>] ? common_interrupt+0x2c/0x34
+ [ 2845.743742] [<c010a14f>] ? mwait_idle+0x4f/0x70
+ [ 2845.743742] [<c0101c05>] ? cpu_idle+0x65/0xb0
+ [ 2845.743742] [<c045731e>] ? rest_init+0x4e/0x60
+ [ 2845.743742] Code: 0f 84 69 02 00 00 83 ff 07 0f 87 1e 06 00 00 85 ff 0f 85 08 05 00 00 8b 4d cc 8b 49 04 85 c9 89 4d d4 0f 84 f7 04 00 00 8b 75 d4 <f0> ff 86 c4 00 00 00 89 f0 e8 56 a9 ff ff 85 c0 0f 85 6e 03 00
+ [ 2845.743742] EIP: [<c015515c>] __lock_acquire+0x6c/0xa80 SS:ESP 0068:c064fcc8
+ [ 2845.743742] ---[ end trace 4c985b38f022279f ]---
+ [ 2845.743742] Kernel panic - not syncing: Fatal exception in interrupt
+ [ 2845.743742] ------------[ cut here ]------------
+ [ 2845.743742] WARNING: at kernel/smp.c:329 smp_call_function_many+0x151/0x200()
+ [ 2845.743742] Hardware name: Dell DM051
+ [ 2845.743742] Modules linked in: btusb netconsole snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device snd_pcm_oss snd_mixer_oss rfcomm l2cap bluetooth vfat fuse snd_hda_codec_idt snd_hda_intel snd_hda_codec snd_hwdep snd_pcm pl2303 snd_timer psmouse usbserial snd 3c59x e100 serio_raw soundcore i2c_i801 intel_agp mii agpgart snd_page_alloc rtc_cmos rtc_core thermal processor rtc_lib button thermal_sys sg evdev
+ [ 2845.743742] Pid: 0, comm: swapper Tainted: G D 2.6.29-rc5-smp #54
+ [ 2845.743742] Call Trace:
+ [ 2845.743742] [<c012e076>] warn_slowpath+0x86/0xa0
+ [ 2845.743742] [<c015041b>] ? trace_hardirqs_off+0xb/0x10
+ [ 2845.743742] [<c0146384>] ? up+0x14/0x40
+ [ 2845.743742] [<c012e661>] ? release_console_sem+0x31/0x1e0
+ [ 2845.743742] [<c046c8ab>] ? _spin_lock_irqsave+0x6b/0x80
+ [ 2845.743742] [<c015041b>] ? trace_hardirqs_off+0xb/0x10
+ [ 2845.743742] [<c046c900>] ? _read_lock_irqsave+0x40/0x80
+ [ 2845.743742] [<c012e7f2>] ? release_console_sem+0x1c2/0x1e0
+ [ 2845.743742] [<c0146384>] ? up+0x14/0x40
+ [ 2845.743742] [<c015041b>] ? trace_hardirqs_off+0xb/0x10
+ [ 2845.743742] [<c046a3d7>] ? __mutex_unlock_slowpath+0x97/0x160
+ [ 2845.743742] [<c046a563>] ? mutex_trylock+0xb3/0x180
+ [ 2845.743742] [<c046a4a8>] ? mutex_unlock+0x8/0x10
+ [ 2845.743742] [<c015b991>] smp_call_function_many+0x151/0x200
+ [ 2845.743742] [<c010a1a0>] ? stop_this_cpu+0x0/0x40
+ [ 2845.743742] [<c015ba61>] smp_call_function+0x21/0x30
+ [ 2845.743742] [<c01137ae>] native_smp_send_stop+0x1e/0x50
+ [ 2845.743742] [<c012e0f5>] panic+0x55/0x110
+ [ 2845.743742] [<c01065a8>] oops_end+0xb8/0xc0
+ [ 2845.743742] [<c010668f>] die+0x4f/0x70
+ [ 2845.743742] [<c011a8c9>] do_page_fault+0x269/0x610
+ [ 2845.743742] [<c011a660>] ? do_page_fault+0x0/0x610
+ [ 2845.743742] [<c046cbaf>] error_code+0x77/0x7c
+ [ 2845.743742] [<c015515c>] ? __lock_acquire+0x6c/0xa80
+ [ 2845.743742] [<c0153732>] ? trace_hardirqs_on_caller+0x72/0x1c0
+ [ 2845.743742] [<c0155be6>] lock_acquire+0x76/0xa0
+ [ 2845.743742] [<c03e1aad>] ? skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c046c885>] _spin_lock_irqsave+0x45/0x80
+ [ 2845.743742] [<c03e1aad>] ? skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c03e1aad>] skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c03e1f94>] skb_queue_purge+0x14/0x20
+ [ 2845.743742] [<f8171f5a>] hci_conn_del+0x10a/0x1c0 [bluetooth]
+ [ 2845.743742] [<f81399c9>] ? l2cap_disconn_ind+0x59/0xb0 [l2cap]
+ [ 2845.743742] [<f81795ce>] ? hci_conn_del_sysfs+0x8e/0xd0 [bluetooth]
+ [ 2845.743742] [<f8175758>] hci_event_packet+0x5f8/0x31c0 [bluetooth]
+ [ 2845.743742] [<c03dfe19>] ? sock_def_readable+0x59/0x80
+ [ 2845.743742] [<c046c14d>] ? _read_unlock+0x1d/0x20
+ [ 2845.743742] [<f8178aa9>] ? hci_send_to_sock+0xe9/0x1d0 [bluetooth]
+ [ 2845.743742] [<c015388b>] ? trace_hardirqs_on+0xb/0x10
+ [ 2845.743742] [<f816fa6a>] hci_rx_task+0x2ba/0x490 [bluetooth]
+ [ 2845.743742] [<c0133661>] ? tasklet_action+0x31/0xc0
+ [ 2845.743742] [<c013367c>] tasklet_action+0x4c/0xc0
+ [ 2845.743742] [<c0132eb7>] __do_softirq+0xa7/0x170
+ [ 2845.743742] [<c0116dec>] ? ack_apic_level+0x5c/0x1c0
+ [ 2845.743742] [<c0132fd7>] do_softirq+0x57/0x60
+ [ 2845.743742] [<c01333dc>] irq_exit+0x7c/0x90
+ [ 2845.743742] [<c01055bb>] do_IRQ+0x4b/0x90
+ [ 2845.743742] [<c01333d5>] ? irq_exit+0x75/0x90
+ [ 2845.743742] [<c010392c>] common_interrupt+0x2c/0x34
+ [ 2845.743742] [<c010a14f>] ? mwait_idle+0x4f/0x70
+ [ 2845.743742] [<c0101c05>] cpu_idle+0x65/0xb0
+ [ 2845.743742] [<c045731e>] rest_init+0x4e/0x60
+ [ 2845.743742] ---[ end trace 4c985b38f02227a0 ]---
+ [ 2845.743742] ------------[ cut here ]------------
+ [ 2845.743742] WARNING: at kernel/smp.c:226 smp_call_function_single+0x8e/0x110()
+ [ 2845.743742] Hardware name: Dell DM051
+ [ 2845.743742] Modules linked in: btusb netconsole snd_seq_dummy snd_seq_oss snd_seq_midi_event snd_seq snd_seq_device snd_pcm_oss snd_mixer_oss rfcomm l2cap bluetooth vfat fuse snd_hda_codec_idt snd_hda_intel snd_hda_codec snd_hwdep snd_pcm pl2303 snd_timer psmouse usbserial snd 3c59x e100 serio_raw soundcore i2c_i801 intel_agp mii agpgart snd_page_alloc rtc_cmos rtc_core thermal processor rtc_lib button thermal_sys sg evdev
+ [ 2845.743742] Pid: 0, comm: swapper Tainted: G D W 2.6.29-rc5-smp #54
+ [ 2845.743742] Call Trace:
+ [ 2845.743742] [<c012e076>] warn_slowpath+0x86/0xa0
+ [ 2845.743742] [<c012e000>] ? warn_slowpath+0x10/0xa0
+ [ 2845.743742] [<c015041b>] ? trace_hardirqs_off+0xb/0x10
+ [ 2845.743742] [<c0146384>] ? up+0x14/0x40
+ [ 2845.743742] [<c012e661>] ? release_console_sem+0x31/0x1e0
+ [ 2845.743742] [<c046c8ab>] ? _spin_lock_irqsave+0x6b/0x80
+ [ 2845.743742] [<c015041b>] ? trace_hardirqs_off+0xb/0x10
+ [ 2845.743742] [<c046c900>] ? _read_lock_irqsave+0x40/0x80
+ [ 2845.743742] [<c012e7f2>] ? release_console_sem+0x1c2/0x1e0
+ [ 2845.743742] [<c0146384>] ? up+0x14/0x40
+ [ 2845.743742] [<c015b7be>] smp_call_function_single+0x8e/0x110
+ [ 2845.743742] [<c010a1a0>] ? stop_this_cpu+0x0/0x40
+ [ 2845.743742] [<c026d23f>] ? cpumask_next_and+0x1f/0x40
+ [ 2845.743742] [<c015b95a>] smp_call_function_many+0x11a/0x200
+ [ 2845.743742] [<c010a1a0>] ? stop_this_cpu+0x0/0x40
+ [ 2845.743742] [<c015ba61>] smp_call_function+0x21/0x30
+ [ 2845.743742] [<c01137ae>] native_smp_send_stop+0x1e/0x50
+ [ 2845.743742] [<c012e0f5>] panic+0x55/0x110
+ [ 2845.743742] [<c01065a8>] oops_end+0xb8/0xc0
+ [ 2845.743742] [<c010668f>] die+0x4f/0x70
+ [ 2845.743742] [<c011a8c9>] do_page_fault+0x269/0x610
+ [ 2845.743742] [<c011a660>] ? do_page_fault+0x0/0x610
+ [ 2845.743742] [<c046cbaf>] error_code+0x77/0x7c
+ [ 2845.743742] [<c015515c>] ? __lock_acquire+0x6c/0xa80
+ [ 2845.743742] [<c0153732>] ? trace_hardirqs_on_caller+0x72/0x1c0
+ [ 2845.743742] [<c0155be6>] lock_acquire+0x76/0xa0
+ [ 2845.743742] [<c03e1aad>] ? skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c046c885>] _spin_lock_irqsave+0x45/0x80
+ [ 2845.743742] [<c03e1aad>] ? skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c03e1aad>] skb_dequeue+0x1d/0x70
+ [ 2845.743742] [<c03e1f94>] skb_queue_purge+0x14/0x20
+ [ 2845.743742] [<f8171f5a>] hci_conn_del+0x10a/0x1c0 [bluetooth]
+ [ 2845.743742] [<f81399c9>] ? l2cap_disconn_ind+0x59/0xb0 [l2cap]
+ [ 2845.743742] [<f81795ce>] ? hci_conn_del_sysfs+0x8e/0xd0 [bluetooth]
+ [ 2845.743742] [<f8175758>] hci_event_packet+0x5f8/0x31c0 [bluetooth]
+ [ 2845.743742] [<c03dfe19>] ? sock_def_readable+0x59/0x80
+ [ 2845.743742] [<c046c14d>] ? _read_unlock+0x1d/0x20
+ [ 2845.743742] [<f8178aa9>] ? hci_send_to_sock+0xe9/0x1d0 [bluetooth]
+ [ 2845.743742] [<c015388b>] ? trace_hardirqs_on+0xb/0x10
+ [ 2845.743742] [<f816fa6a>] hci_rx_task+0x2ba/0x490 [bluetooth]
+ [ 2845.743742] [<c0133661>] ? tasklet_action+0x31/0xc0
+ [ 2845.743742] [<c013367c>] tasklet_action+0x4c/0xc0
+ [ 2845.743742] [<c0132eb7>] __do_softirq+0xa7/0x170
+ [ 2845.743742] [<c0116dec>] ? ack_apic_level+0x5c/0x1c0
+ [ 2845.743742] [<c0132fd7>] do_softirq+0x57/0x60
+ [ 2845.743742] [<c01333dc>] irq_exit+0x7c/0x90
+ [ 2845.743742] [<c01055bb>] do_IRQ+0x4b/0x90
+ [ 2845.743742] [<c01333d5>] ? irq_exit+0x75/0x90
+ [ 2845.743742] [<c010392c>] common_interrupt+0x2c/0x34
+ [ 2845.743742] [<c010a14f>] ? mwait_idle+0x4f/0x70
+ [ 2845.743742] [<c0101c05>] cpu_idle+0x65/0xb0
+ [ 2845.743742] [<c045731e>] rest_init+0x4e/0x60
+ [ 2845.743742] ---[ end trace 4c985b38f02227a1 ]---
+ [ 2845.743742] Rebooting in 3 seconds..
+
+ My logitec bluetooth mouse trying connect to pc, but
+ pc side reject the connection again and again. then panic happens.
+
+ The reason is due to hci_conn_del_sysfs now called in hci_event_packet,
+ the del work is done in a workqueue, so it's possible done before
+ skb_queue_purge called.
+
+ I move the hci_conn_del_sysfs after skb_queue_purge just as that before
+ marcel's commit.
+
+ Remove the hci_conn_del_sysfs in hci_conn_hash_flush as well due to
+ hci_conn_del will deal with the work.
+
+ Signed-off-by: Dave Young <hidave.darkstar@gmail.com>
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 2526d3d8b2f671a7d36cc486af984052cd5a690f
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri Feb 20 20:54:06 2009 +0100
+
+ Bluetooth: Permit BT_SECURITY also for L2CAP raw sockets
+
+ Userspace pairing code can be simplified if it doesn't have to fall
+ back to using L2CAP_LM in the case of L2CAP raw sockets. This patch
+ allows the BT_SECURITY socket option to be used for these sockets.
+
+ Signed-off-by: Johan Hedberg <johan.hedberg@nokia.com>
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 37e62f5516cfb210e64fe53457932df4341b0ad1
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Tue Feb 17 21:49:33 2009 +0100
+
+ Bluetooth: Fix RFCOMM usage of in-kernel L2CAP sockets
+
+ The CID value of L2CAP sockets need to be set to zero. All userspace
+ applications do this via memset() on the sockaddr_l2 structure. The
+ RFCOMM implementation uses in-kernel L2CAP sockets and so it has to
+ make sure that l2_cid is set to zero.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 2a517ca687232adc8f14893730644da712010ffc
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Mon Feb 16 03:20:31 2009 +0100
+
+ Bluetooth: Disallow usage of L2CAP CID setting for now
+
+ In the future the L2CAP layer will have full support for fixed channels
+ and right now it already can export the channel assignment, but for the
+ functions bind() and connect() the usage of only CID 0 is allowed. This
+ allows an easy detection if the kernel supports fixed channels or not,
+ because otherwise it would impossible for application to tell.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 8bf4794174659b06d43cc5e290cd384757374613
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Mon Feb 16 02:59:49 2009 +0100
+
+ Bluetooth: Change RFCOMM to use BT_CONNECT2 for BT_DEFER_SETUP
+
+ When BT_DEFER_SETUP is enabled on a RFCOMM socket, then switch its
+ current state from BT_OPEN to BT_CONNECT2. This gives the Bluetooth
+ core a unified way to handle L2CAP and RFCOMM sockets. The BT_CONNECT2
+ state is designated for incoming connections.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit d5f2d2be68876f65dd051b978a7b66265fde9ffd
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Mon Feb 16 02:57:30 2009 +0100
+
+ Bluetooth: Fix poll() misbehavior when using BT_DEFER_SETUP
+
+ When BT_DEFER_SETUP has been enabled on a Bluetooth socket it keeps
+ signaling POLLIN all the time. This is a wrong behavior. The POLLIN
+ should only be signaled if the client socket is in BT_CONNECT2 state
+ and the parent has been BT_DEFER_SETUP enabled.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 96a3183322cba1a2846771b067c99b9d6f481263
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Feb 12 16:23:03 2009 +0100
+
+ Bluetooth: Set authentication requirement before requesting it
+
+ The authentication requirement got only updated when the security level
+ increased. This is a wrong behavior. The authentication requirement is
+ read by the Bluetooth daemon to make proper decisions when handling the
+ IO capabilities exchange. So set the value that is currently expected by
+ the higher layers like L2CAP and RFCOMM.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 00ae4af91d8c5b6814e2bb3bfaaf743845f989eb
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Feb 12 16:19:45 2009 +0100
+
+ Bluetooth: Fix authentication requirements for L2CAP security check
+
+ The L2CAP layer can trigger the authentication via an ACL connection or
+ later on to increase the security level. When increasing the security
+ level it didn't use the same authentication requirements when triggering
+ a new ACL connection. Make sure that exactly the same authentication
+ requirements are used. The only exception here are the L2CAP raw sockets
+ which are only used for dedicated bonding.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 2950f21acb0f6b8fcd964485c2ebf1e06545ac20
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Feb 12 14:02:50 2009 +0100
+
+ Bluetooth: Ask upper layers for HCI disconnect reason
+
+ Some of the qualification tests demand that in case of failures in L2CAP
+ the HCI disconnect should indicate a reason why L2CAP fails. This is a
+ bluntly layer violation since multiple L2CAP connections could be using
+ the same ACL and thus forcing a disconnect reason is not a good idea.
+
+ To comply with the Bluetooth test specification, the disconnect reason
+ is now stored in the L2CAP connection structure and every time a new
+ L2CAP channel is added it will set back to its default. So only in the
+ case where the L2CAP channel with the disconnect reason is really the
+ last one, it will propagated to the HCI layer.
+
+ The HCI layer has been extended with a disconnect indication that allows
+ it to ask upper layers for a disconnect reason. The upper layer must not
+ support this callback and in that case it will nicely default to the
+ existing behavior. If an upper layer like L2CAP can provide a disconnect
+ reason that one will be used to disconnect the ACL or SCO link.
+
+ No modification to the ACL disconnect timeout have been made. So in case
+ of Linux to Linux connection the initiator will disconnect the ACL link
+ before the acceptor side can signal the specific disconnect reason. That
+ is perfectly fine since Linux doesn't make use of this value anyway. The
+ L2CAP layer has a perfect valid error code for rejecting connection due
+ to a security violation. It is unclear why the Bluetooth specification
+ insists on having specific HCI disconnect reason.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit f29972de8e7476706ab3c01304a505e7c95d9040
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Feb 12 05:07:45 2009 +0100
+
+ Bluetooth: Add CID field to L2CAP socket address structure
+
+ In preparation for L2CAP fixed channel support, the CID value of a
+ L2CAP connection needs to be accessible via the socket interface. The
+ CID is the connection identifier and exists as source and destination
+ value. So extend the L2CAP socket address structure with this field and
+ change getsockname() and getpeername() to fill it in.
+
+ The bind() and connect() functions have been modified to handle L2CAP
+ socket address structures of variable sizes. This makes them future
+ proof if additional fields need to be added.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit e1027a7c69700301d14db03d2e049ee60c4f92df
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Mon Feb 9 09:18:02 2009 +0100
+
+ Bluetooth: Request L2CAP fixed channel list if available
+
+ If the extended features mask indicates support for fixed channels,
+ request the list of available fixed channels. This also enables the
+ fixed channel features bit so remote implementations can request
+ information about it. Currently only the signal channel will be
+ listed.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 435fef20acfc48f46476abad55b0cd3aa47b8365
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Mon Feb 9 03:55:28 2009 +0100
+
+ Bluetooth: Don't enforce authentication for L2CAP PSM 1 and 3
+
+ The recommendation for the L2CAP PSM 1 (SDP) is to not use any kind
+ of authentication or encryption. So don't trigger authentication
+ for incoming and outgoing SDP connections.
+
+ For L2CAP PSM 3 (RFCOMM) there is no clear requirement, but with
+ Bluetooth 2.1 the initiator is required to enable authentication
+ and encryption first and this gets enforced. So there is no need
+ to trigger an additional authentication step. The RFCOMM service
+ security will make sure that a secure enough link key is present.
+
+ When the encryption gets enabled after the SDP connection setup,
+ then switch the security level from SDP to low security.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 6a8d3010b313d99adbb28f1826fac0234395bb26
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri Feb 6 23:56:36 2009 +0100
+
+ Bluetooth: Fix double L2CAP connection request
+
+ If the remote L2CAP server uses authentication pending stage and
+ encryption is enabled it can happen that a L2CAP connection request is
+ sent twice due to a race condition in the connection state machine.
+
+ When the remote side indicates any kind of connection pending, then
+ track this state and skip sending of L2CAP commands for this period.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 984947dc64f82bc6cafa4d84ba1a139718f634a8
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri Feb 6 23:35:19 2009 +0100
+
+ Bluetooth: Fix race condition with L2CAP information request
+
+ When two L2CAP connections are requested quickly after the ACL link has
+ been established there exists a window for a race condition where a
+ connection request is sent before the information response has been
+ received. Any connection request should only be sent after an exchange
+ of the extended features mask has been finished.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 657e17b03c80bec817975984d221bef716f83558
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri Feb 6 19:45:36 2009 +0100
+
+ Bluetooth: Set authentication requirements if not available
+
+ When no authentication requirements are selected, but an outgoing or
+ incoming connection has requested any kind of security enforcement,
+ then set these authentication requirements.
+
+ This ensures that the userspace always gets informed about the
+ authentication requirements (if available). Only when no security
+ enforcement has happened, the kernel will signal invalid requirements.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 0684e5f9fb9e3f7e168ab831dfca693bcb44805b
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Mon Feb 9 02:48:38 2009 +0100
+
+ Bluetooth: Use general bonding whenever possible
+
+ When receiving incoming connection to specific services, always use
+ general bonding. This ensures that the link key gets stored and can be
+ used for further authentications.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit efc7688b557dd1be10eead7399b315efcb1dbc74
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri Feb 6 09:13:37 2009 +0100
+
+ Bluetooth: Add SCO fallback for eSCO connection attempts
+
+ When attempting to setup eSCO connections it can happen that some link
+ manager implementations fail to properly negotiate the eSCO parameters
+ and thus fail the eSCO setup. Normally the link manager is responsible
+ for the negotiation of the parameters and actually fallback to SCO if
+ no agreement can be reached. In cases where the link manager is just too
+ stupid, then at least try to establish a SCO link if eSCO fails.
+
+ For the Bluetooth devices with EDR support this includes handling packet
+ types of EDR basebands. This is particular tricky since for the EDR the
+ logic of enabling/disabling one specific packet type is turned around.
+ This fix contains an extra bitmask to disable eSCO EDR packet when
+ trying to fallback to a SCO connection.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 255c76014af74165428e7aa16414b857e2bdccf2
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Wed Feb 4 21:07:19 2009 +0100
+
+ Bluetooth: Don't check encryption for L2CAP raw sockets
+
+ For L2CAP sockets with medium and high security requirement a missing
+ encryption will enforce the closing of the link. For the L2CAP raw
+ sockets this is not needed, so skip that check.
+
+ This fixes a crash when pairing Bluetooth 2.0 (and earlier) devices
+ since the L2CAP state machine got confused and then locked up the whole
+ system.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 43c2e57f94c15744495fee564610aa24602b3824
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Wed Feb 4 17:41:38 2009 +0100
+
+ Bluetooth: Submit bulk URBs along with interrupt URBs
+
+ Submitting the bulk URBs for ACL data transfers only on demand has no
+ real benefit compared to just submit them when a Bluetooth device gets
+ opened. So when submitting the interrupt URBs for HCI events, just
+ submit the bulk URBs, too.
+
+ This solves a problem with some Bluetooth USB dongles that has been
+ reported over the last few month. These devices require that the bulk
+ URBs are actually present. These devices are really broken, but there
+ is nothing we can do about it.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 6e1031a40029492c10509e8c3dcac9b611438ccb
+Author: Jaikumar Ganesh <jaikumar@google.com>
+Date: Mon Feb 2 18:03:57 2009 -0800
+
+ Bluetooth: When encryption is dropped, do not send RFCOMM packets
+
+ During a role change with pre-Bluetooth 2.1 devices, the remote side drops
+ the encryption of the RFCOMM connection. We allow a grace period for the
+ encryption to be re-established, before dropping the connection. During
+ this grace period, the RFCOMM_SEC_PENDING flag is set. Check this flag
+ before sending RFCOMM packets.
+
+ Signed-off-by: Jaikumar Ganesh <jaikumar@google.com>
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 34a55eda483e8177c9044f93fd2c9107f02bf1c7
+Author: Andre Haupt <andre@bitwigglers.org>
+Date: Mon Feb 2 14:45:11 2009 -0800
+
+ Bluetooth: Eliminate a sparse warning in bt3c driver
+
+ This eliminates a sparse warning that symbol 'stat' shadows an earlier one.
+
+ Signed-off-by: Andre Haupt <andre@bitwigglers.org>
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit dd2efd03b49d56ae795c71335bc7358022514c32
+Author: Dave Young <hidave.darkstar@gmail.com>
+Date: Sat Jan 31 13:51:15 2009 +0800
+
+ Bluetooth: Remove CONFIG_DEBUG_LOCK_ALLOC ifdefs
+
+ Due to lockdep changes, the CONFIG_DEBUG_LOCK_ALLOC ifdef is not needed
+ now. So just remove it here.
+
+ The following commit fixed the !lockdep build warnings:
+
+ commit e8f6fbf62de37cbc2e179176ac7010d5f4396b67
+ Author: Ingo Molnar <mingo@elte.hu>
+ Date: Wed Nov 12 01:38:36 2008 +0000
+
+ lockdep: include/linux/lockdep.h - fix warning in net/bluetooth/af_bluetooth.c
+
+ Signed-off-by: Dave Young <hidave.darkstar@gmail.com>
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 5f9018af004fa8635bbbe3ab2dc61e8a686edfaa
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri Jan 16 10:09:50 2009 +0100
+
+ Bluetooth: Update version numbers
+
+ With the support for the enhanced security model and the support for
+ deferring connection setup, it is a good idea to increase various
+ version numbers.
+
+ This is purely cosmetic and has no effect on the behavior, but can
+ be really helpful when debugging problems in different kernel versions.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 0588d94fd7e414367a7ae517569d2222441c255f
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri Jan 16 10:06:13 2009 +0100
+
+ Bluetooth: Restrict application of socket options
+
+ The new socket options should only be evaluated for SOL_BLUETOOTH level
+ and not for every other level. Previously this causes some minor issues
+ when detecting if a kernel with certain features is available.
+
+ Also restrict BT_SECURITY to SOCK_SEQPACKET for L2CAP and SOCK_STREAM for
+ the RFCOMM protocol.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit f62e4323ab43c59e7cd7f72c1eb392d7c767ce5a
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:58:44 2009 +0100
+
+ Bluetooth: Disconnect L2CAP connections without encryption
+
+ For L2CAP connections with high security setting, the link will be
+ immediately dropped when the encryption gets disabled. For L2CAP
+ connections with medium security there will be grace period where
+ the remote device has the chance to re-enable encryption. If it
+ doesn't happen then the link will also be disconnected.
+
+ The requirement for the grace period with medium security comes from
+ Bluetooth 2.0 and earlier devices that require role switching.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 8c84b83076b5062f59b6167cdda90d9e5124aa71
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri Jan 16 08:17:51 2009 +0100
+
+ Bluetooth: Pause RFCOMM TX when encryption drops
+
+ A role switch with devices following the Bluetooth pre-2.1 standards
+ or without Encryption Pause and Resume support is not possible if
+ encryption is enabled. Most newer headsets require the role switch,
+ but also require that the connection is encrypted.
+
+ For connections with a high security mode setting, the link will be
+ immediately dropped. When the connection uses medium security mode
+ setting, then a grace period is introduced where the TX is halted and
+ the remote device gets a change to re-enable encryption after the
+ role switch. If not re-enabled the link will be dropped.
+
+ Based on initial work by Ville Tervo <ville.tervo@nokia.com>
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 9f2c8a03fbb3048cf38b158f87aa0c3c09bca084
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:58:40 2009 +0100
+
+ Bluetooth: Replace RFCOMM link mode with security level
+
+ Change the RFCOMM internals to use the new security levels and remove
+ the link mode details.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 2af6b9d518ddfbc4d6990d5f9c9b1a05341c1cef
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:58:38 2009 +0100
+
+ Bluetooth: Replace L2CAP link mode with security level
+
+ Change the L2CAP internals to use the new security levels and remove
+ the link mode details.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 8c1b235594fbab9a13240a1dac12ea9fd99b6440
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:58:04 2009 +0100
+
+ Bluetooth: Add enhanced security model for Simple Pairing
+
+ The current security model is based around the flags AUTH, ENCRYPT and
+ SECURE. Starting with support for the Bluetooth 2.1 specification this is
+ no longer sufficient. The different security levels are now defined as
+ SDP, LOW, MEDIUM and SECURE.
+
+ Previously it was possible to set each security independently, but this
+ actually doesn't make a lot of sense. For Bluetooth the encryption depends
+ on a previous successful authentication. Also you can only update your
+ existing link key if you successfully created at least one before. And of
+ course the update of link keys without having proper encryption in place
+ is a security issue.
+
+ The new security levels from the Bluetooth 2.1 specification are now
+ used internally. All old settings are mapped to the new values and this
+ way it ensures that old applications still work. The only limitation
+ is that it is no longer possible to set authentication without also
+ enabling encryption. No application should have done this anyway since
+ this is actually a security issue. Without encryption the integrity of
+ the authentication can't be guaranteed.
+
+ As default for a new L2CAP or RFCOMM connection, the LOW security level
+ is used. The only exception here are the service discovery sessions on
+ PSM 1 where SDP level is used. To have similar security strength as with
+ a Bluetooth 2.0 and before combination key, the MEDIUM level should be
+ used. This is according to the Bluetooth specification. The MEDIUM level
+ will not require any kind of man-in-the-middle (MITM) protection. Only
+ the HIGH security level will require this.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit c89b6e6bda4c8021195778f47567d0cc9dbfe7ec
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:57:03 2009 +0100
+
+ Bluetooth: Fix SCO state handling for incoming connections
+
+ When the remote device supports only SCO connections, on receipt of
+ the HCI_EV_CONN_COMPLETE event packet, the connect state is changed to
+ BT_CONNECTED, but the socket state is not updated. Hence, the connect()
+ call times out even though the SCO connection has been successfully
+ established.
+
+ Based on a report by Jaikumar Ganesh <jaikumar@google.com>
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 71aeeaa1fd88fe7446391e0553336f0e0c2cfe6a
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:57:02 2009 +0100
+
+ Bluetooth: Reject incoming SCO connections without listeners
+
+ All SCO and eSCO connection are auto-accepted no matter if there is a
+ corresponding listening socket for them. This patch changes this and
+ connection requests for SCO and eSCO without any socket are rejected.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit f66dc81f44d918ee1aa1a9d821bb2f25c7592bc0
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:57:00 2009 +0100
+
+ Bluetooth: Add support for deferring L2CAP connection setup
+
+ In order to decide if listening L2CAP sockets should be accept()ed
+ the BD_ADDR of the remote device needs to be known. This patch adds
+ a socket option which defines a timeout for deferring the actual
+ connection setup.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit bb23c0ab824653be4aa7dfca15b07b3059717004
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:56:48 2009 +0100
+
+ Bluetooth: Add support for deferring RFCOMM connection setup
+
+ In order to decide if listening RFCOMM sockets should be accept()ed
+ the BD_ADDR of the remote device needs to be known. This patch adds
+ a socket option which defines a timeout for deferring the actual
+ connection setup.
+
+ The connection setup is done after reading from the socket for the
+ first time. Until then writing to the socket returns ENOTCONN.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit c4f912e155504e94dd4f3d63c378dab0ff03dbda
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:52:16 2009 +0100
+
+ Bluetooth: Add global deferred socket parameter
+
+ The L2CAP and RFCOMM applications require support for authorization
+ and the ability of rejecting incoming connection requests. The socket
+ interface is not really able to support this.
+
+ This patch does the ground work for a socket option to defer connection
+ setup. Setting this option allows calling of accept() and then the
+ first read() will trigger the final connection setup. Calling close()
+ would reject the connection.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit d58daf42d29a3a4a4d4be46cf47ceee096789680
+Author: Marcel Holtmann <marcel@holtmann.org>
+Date: Thu Jan 15 21:52:14 2009 +0100
+
+ Bluetooth: Preparation for usage of SOL_BLUETOOTH
+
+ The socket option levels SOL_L2CAP, SOL_RFOMM and SOL_SCO are currently
+ in use by various Bluetooth applications. Going forward the common
+ option level SOL_BLUETOOTH should be used. This patch prepares the clean
+ split of the old and new option levels while keeping everything backward
+ compatibility.
+
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+commit 91aa35a5aa3540223066bf6b51c935418c63a35d
+Author: Victor Shcherbatyuk <victor.shcherbatyuk@tomtom.com>
+Date: Thu Jan 15 21:52:12 2009 +0100
+
+ Bluetooth: Fix issue with return value of rfcomm_sock_sendmsg()
+
+ In case of connection failures the rfcomm_sock_sendmsg() should return
+ an error and not a 0 value.
+
+ Signed-off-by: Victor Shcherbatyuk <victor.shcherbatyuk@tomtom.com>
+ Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+
+diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
+index d3f14be..2a00707 100644
+--- a/drivers/bluetooth/bfusb.c
++++ b/drivers/bluetooth/bfusb.c
+@@ -257,8 +257,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
+
+ if (hdr & 0x10) {
+ BT_ERR("%s error in block", data->hdev->name);
+- if (data->reassembly)
+- kfree_skb(data->reassembly);
++ kfree_skb(data->reassembly);
+ data->reassembly = NULL;
+ return -EIO;
+ }
+diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
+index ff195c2..d58e22b 100644
+--- a/drivers/bluetooth/bt3c_cs.c
++++ b/drivers/bluetooth/bt3c_cs.c
+@@ -359,9 +359,9 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
+ BT_ERR("Very strange (stat=0x%04x)", stat);
+ } else if ((stat & 0xff) != 0xff) {
+ if (stat & 0x0020) {
+- int stat = bt3c_read(iobase, 0x7002) & 0x10;
++ int status = bt3c_read(iobase, 0x7002) & 0x10;
+ BT_INFO("%s: Antenna %s", info->hdev->name,
+- stat ? "out" : "in");
++ status ? "out" : "in");
+ }
+ if (stat & 0x0001)
+ bt3c_receive(info);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index b5fbda6..e70c57e 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -35,7 +35,7 @@
+ #include <net/bluetooth/bluetooth.h>
+ #include <net/bluetooth/hci_core.h>
+
+-#define VERSION "0.4"
++#define VERSION "0.5"
+
+ static int ignore_dga;
+ static int ignore_csr;
+@@ -171,6 +171,7 @@ struct btusb_data {
+
+ __u8 cmdreq_type;
+
++ unsigned int sco_num;
+ int isoc_altsetting;
+ int suspend_count;
+ };
+@@ -496,11 +497,23 @@ static int btusb_open(struct hci_dev *hdev)
+ return 0;
+
+ err = btusb_submit_intr_urb(hdev, GFP_KERNEL);
++ if (err < 0)
++ goto failed;
++
++ err = btusb_submit_bulk_urb(hdev, GFP_KERNEL);
+ if (err < 0) {
+- clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+- clear_bit(HCI_RUNNING, &hdev->flags);
++ usb_kill_anchored_urbs(&data->intr_anchor);
++ goto failed;
+ }
+
++ set_bit(BTUSB_BULK_RUNNING, &data->flags);
++ btusb_submit_bulk_urb(hdev, GFP_KERNEL);
++
++ return 0;
++
++failed:
++ clear_bit(BTUSB_INTR_RUNNING, &data->flags);
++ clear_bit(HCI_RUNNING, &hdev->flags);
+ return err;
+ }
+
+@@ -655,19 +668,10 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
+
+ BT_DBG("%s evt %d", hdev->name, evt);
+
+- if (hdev->conn_hash.acl_num > 0) {
+- if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) {
+- if (btusb_submit_bulk_urb(hdev, GFP_ATOMIC) < 0)
+- clear_bit(BTUSB_BULK_RUNNING, &data->flags);
+- else
+- btusb_submit_bulk_urb(hdev, GFP_ATOMIC);
+- }
+- } else {
+- clear_bit(BTUSB_BULK_RUNNING, &data->flags);
+- usb_unlink_anchored_urbs(&data->bulk_anchor);
++ if (hdev->conn_hash.sco_num != data->sco_num) {
++ data->sco_num = hdev->conn_hash.sco_num;
++ schedule_work(&data->work);
+ }
+-
+- schedule_work(&data->work);
+ }
+
+ static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting)
+@@ -982,9 +986,11 @@ static int btusb_resume(struct usb_interface *intf)
+ }
+
+ if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) {
+- if (btusb_submit_bulk_urb(hdev, GFP_NOIO) < 0)
++ err = btusb_submit_bulk_urb(hdev, GFP_NOIO);
++ if (err < 0) {
+ clear_bit(BTUSB_BULK_RUNNING, &data->flags);
+- else
++ return err;
++ } else
+ btusb_submit_bulk_urb(hdev, GFP_NOIO);
+ }
+
+diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
+index b0fafb0..c0ce813 100644
+--- a/drivers/bluetooth/hci_h4.c
++++ b/drivers/bluetooth/hci_h4.c
+@@ -102,8 +102,7 @@ static int h4_close(struct hci_uart *hu)
+
+ skb_queue_purge(&h4->txq);
+
+- if (h4->rx_skb)
+- kfree_skb(h4->rx_skb);
++ kfree_skb(h4->rx_skb);
+
+ hu->priv = NULL;
+ kfree(h4);
+diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
+index b91d45a..5c65014 100644
+--- a/drivers/bluetooth/hci_ll.c
++++ b/drivers/bluetooth/hci_ll.c
+@@ -163,8 +163,7 @@ static int ll_close(struct hci_uart *hu)
+ skb_queue_purge(&ll->tx_wait_q);
+ skb_queue_purge(&ll->txq);
+
+- if (ll->rx_skb)
+- kfree_skb(ll->rx_skb);
++ kfree_skb(ll->rx_skb);
+
+ hu->priv = NULL;
+
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index a04f846..3ad5390 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -53,6 +53,17 @@
+ #define SOL_SCO 17
+ #define SOL_RFCOMM 18
+
++#define BT_SECURITY 4
++struct bt_security {
++ __u8 level;
++};
++#define BT_SECURITY_SDP 0
++#define BT_SECURITY_LOW 1
++#define BT_SECURITY_MEDIUM 2
++#define BT_SECURITY_HIGH 3
++
++#define BT_DEFER_SETUP 7
++
+ #define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg)
+ #define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg)
+ #define BT_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg)
+@@ -108,6 +119,7 @@ struct bt_sock {
+ bdaddr_t dst;
+ struct list_head accept_q;
+ struct sock *parent;
++ u32 defer_setup;
+ };
+
+ struct bt_sock_list {
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 3645139..f69f015 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -133,8 +133,13 @@ enum {
+ #define ESCO_EV3 0x0008
+ #define ESCO_EV4 0x0010
+ #define ESCO_EV5 0x0020
++#define ESCO_2EV3 0x0040
++#define ESCO_3EV3 0x0080
++#define ESCO_2EV5 0x0100
++#define ESCO_3EV5 0x0200
+
+ #define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
++#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+
+ /* ACL flags */
+ #define ACL_CONT 0x01
+@@ -176,6 +181,9 @@ enum {
+ #define LMP_EV5 0x02
+
+ #define LMP_SNIFF_SUBR 0x02
++#define LMP_EDR_ESCO_2M 0x20
++#define LMP_EDR_ESCO_3M 0x40
++#define LMP_EDR_3S_ESCO 0x80
+
+ #define LMP_SIMPLE_PAIR 0x08
+
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 46a43b7..01f9316 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -169,6 +169,7 @@ struct hci_conn {
+ __u16 link_policy;
+ __u32 link_mode;
+ __u8 auth_type;
++ __u8 sec_level;
+ __u8 power_save;
+ unsigned long pend;
+
+@@ -325,12 +326,11 @@ int hci_conn_del(struct hci_conn *conn);
+ void hci_conn_hash_flush(struct hci_dev *hdev);
+ void hci_conn_check_pending(struct hci_dev *hdev);
+
+-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type);
++struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
+ int hci_conn_check_link_mode(struct hci_conn *conn);
+-int hci_conn_auth(struct hci_conn *conn);
+-int hci_conn_encrypt(struct hci_conn *conn);
++int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
+ int hci_conn_change_link_key(struct hci_conn *conn);
+-int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
++int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
+
+ void hci_conn_enter_active_mode(struct hci_conn *conn);
+ void hci_conn_enter_sniff_mode(struct hci_conn *conn);
+@@ -470,26 +470,26 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+
+ /* ----- HCI protocols ----- */
+ struct hci_proto {
+- char *name;
++ char *name;
+ unsigned int id;
+ unsigned long flags;
+
+ void *priv;
+
+- int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
++ int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
+ int (*connect_cfm) (struct hci_conn *conn, __u8 status);
+- int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
++ int (*disconn_ind) (struct hci_conn *conn);
++ int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
+ int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+ int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
+- int (*auth_cfm) (struct hci_conn *conn, __u8 status);
+- int (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
++ int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
+ };
+
+ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+ {
+ register struct hci_proto *hp;
+ int mask = 0;
+-
++
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->connect_ind)
+ mask |= hp->connect_ind(hdev, bdaddr, type);
+@@ -514,30 +514,52 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
+ hp->connect_cfm(conn, status);
+ }
+
+-static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
++static inline int hci_proto_disconn_ind(struct hci_conn *conn)
+ {
+ register struct hci_proto *hp;
++ int reason = 0x13;
+
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->disconn_ind)
+- hp->disconn_ind(conn, reason);
++ reason = hp->disconn_ind(conn);
+
+ hp = hci_proto[HCI_PROTO_SCO];
+ if (hp && hp->disconn_ind)
+- hp->disconn_ind(conn, reason);
++ reason = hp->disconn_ind(conn);
++
++ return reason;
++}
++
++static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
++{
++ register struct hci_proto *hp;
++
++ hp = hci_proto[HCI_PROTO_L2CAP];
++ if (hp && hp->disconn_cfm)
++ hp->disconn_cfm(conn, reason);
++
++ hp = hci_proto[HCI_PROTO_SCO];
++ if (hp && hp->disconn_cfm)
++ hp->disconn_cfm(conn, reason);
+ }
+
+ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
+ {
+ register struct hci_proto *hp;
++ __u8 encrypt;
++
++ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
++ return;
++
++ encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
+
+ hp = hci_proto[HCI_PROTO_L2CAP];
+- if (hp && hp->auth_cfm)
+- hp->auth_cfm(conn, status);
++ if (hp && hp->security_cfm)
++ hp->security_cfm(conn, status, encrypt);
+
+ hp = hci_proto[HCI_PROTO_SCO];
+- if (hp && hp->auth_cfm)
+- hp->auth_cfm(conn, status);
++ if (hp && hp->security_cfm)
++ hp->security_cfm(conn, status, encrypt);
+ }
+
+ static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
+@@ -545,12 +567,12 @@ static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u
+ register struct hci_proto *hp;
+
+ hp = hci_proto[HCI_PROTO_L2CAP];
+- if (hp && hp->encrypt_cfm)
+- hp->encrypt_cfm(conn, status, encrypt);
++ if (hp && hp->security_cfm)
++ hp->security_cfm(conn, status, encrypt);
+
+ hp = hci_proto[HCI_PROTO_SCO];
+- if (hp && hp->encrypt_cfm)
+- hp->encrypt_cfm(conn, status, encrypt);
++ if (hp && hp->security_cfm)
++ hp->security_cfm(conn, status, encrypt);
+ }
+
+ int hci_register_proto(struct hci_proto *hproto);
+@@ -562,8 +584,7 @@ struct hci_cb {
+
+ char *name;
+
+- void (*auth_cfm) (struct hci_conn *conn, __u8 status);
+- void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
++ void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+ };
+@@ -571,14 +592,20 @@ struct hci_cb {
+ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
+ {
+ struct list_head *p;
++ __u8 encrypt;
+
+ hci_proto_auth_cfm(conn, status);
+
++ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
++ return;
++
++ encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
++
+ read_lock_bh(&hci_cb_list_lock);
+ list_for_each(p, &hci_cb_list) {
+ struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+- if (cb->auth_cfm)
+- cb->auth_cfm(conn, status);
++ if (cb->security_cfm)
++ cb->security_cfm(conn, status, encrypt);
+ }
+ read_unlock_bh(&hci_cb_list_lock);
+ }
+@@ -587,13 +614,16 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encr
+ {
+ struct list_head *p;
+
++ if (conn->sec_level == BT_SECURITY_SDP)
++ conn->sec_level = BT_SECURITY_LOW;
++
+ hci_proto_encrypt_cfm(conn, status, encrypt);
+
+ read_lock_bh(&hci_cb_list_lock);
+ list_for_each(p, &hci_cb_list) {
+ struct hci_cb *cb = list_entry(p, struct hci_cb, list);
+- if (cb->encrypt_cfm)
+- cb->encrypt_cfm(conn, status, encrypt);
++ if (cb->security_cfm)
++ cb->security_cfm(conn, status, encrypt);
+ }
+ read_unlock_bh(&hci_cb_list_lock);
+ }
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index 73e115b..f566aa1 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -37,6 +37,7 @@ struct sockaddr_l2 {
+ sa_family_t l2_family;
+ __le16 l2_psm;
+ bdaddr_t l2_bdaddr;
++ __le16 l2_cid;
+ };
+
+ /* L2CAP socket options */
+@@ -185,6 +186,7 @@ struct l2cap_info_rsp {
+ /* info type */
+ #define L2CAP_IT_CL_MTU 0x0001
+ #define L2CAP_IT_FEAT_MASK 0x0002
++#define L2CAP_IT_FIXED_CHAN 0x0003
+
+ /* info result */
+ #define L2CAP_IR_SUCCESS 0x0000
+@@ -219,11 +221,14 @@ struct l2cap_conn {
+ __u8 rx_ident;
+ __u8 tx_ident;
+
++ __u8 disc_reason;
++
+ struct l2cap_chan_list chan_list;
+ };
+
+ #define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
+-#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x02
++#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
++#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
+
+ /* ----- L2CAP channel and socket info ----- */
+ #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+@@ -237,8 +242,9 @@ struct l2cap_pinfo {
+ __u16 imtu;
+ __u16 omtu;
+ __u16 flush_to;
+-
+- __u32 link_mode;
++ __u8 sec_level;
++ __u8 role_switch;
++ __u8 force_reliable;
+
+ __u8 conf_req[64];
+ __u8 conf_len;
+@@ -257,6 +263,7 @@ struct l2cap_pinfo {
+ #define L2CAP_CONF_REQ_SENT 0x01
+ #define L2CAP_CONF_INPUT_DONE 0x02
+ #define L2CAP_CONF_OUTPUT_DONE 0x04
++#define L2CAP_CONF_CONNECT_PEND 0x80
+
+ #define L2CAP_CONF_MAX_RETRIES 2
+
+diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
+index 4dc8d92..8007261 100644
+--- a/include/net/bluetooth/rfcomm.h
++++ b/include/net/bluetooth/rfcomm.h
+@@ -183,8 +183,9 @@ struct rfcomm_dlc {
+ u8 remote_v24_sig;
+ u8 mscex;
+ u8 out;
+-
+- u32 link_mode;
++ u8 sec_level;
++ u8 role_switch;
++ u32 defer_setup;
+
+ uint mtu;
+ uint cfc;
+@@ -202,10 +203,12 @@ struct rfcomm_dlc {
+ #define RFCOMM_RX_THROTTLED 0
+ #define RFCOMM_TX_THROTTLED 1
+ #define RFCOMM_TIMED_OUT 2
+-#define RFCOMM_MSC_PENDING 3
+-#define RFCOMM_AUTH_PENDING 4
+-#define RFCOMM_AUTH_ACCEPT 5
+-#define RFCOMM_AUTH_REJECT 6
++#define RFCOMM_MSC_PENDING 3
++#define RFCOMM_SEC_PENDING 4
++#define RFCOMM_AUTH_PENDING 5
++#define RFCOMM_AUTH_ACCEPT 6
++#define RFCOMM_AUTH_REJECT 7
++#define RFCOMM_DEFER_SETUP 8
+
+ /* Scheduling flags and events */
+ #define RFCOMM_SCHED_STATE 0
+@@ -239,6 +242,7 @@ int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
+ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
+ int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig);
+ int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig);
++void rfcomm_dlc_accept(struct rfcomm_dlc *d);
+
+ #define rfcomm_dlc_lock(d) spin_lock(&d->lock)
+ #define rfcomm_dlc_unlock(d) spin_unlock(&d->lock)
+@@ -304,7 +308,8 @@ struct rfcomm_pinfo {
+ struct bt_sock bt;
+ struct rfcomm_dlc *dlc;
+ u8 channel;
+- u32 link_mode;
++ u8 sec_level;
++ u8 role_switch;
+ };
+
+ int rfcomm_init_sockets(void);
+@@ -333,7 +338,6 @@ struct rfcomm_dev_req {
+ bdaddr_t src;
+ bdaddr_t dst;
+ u8 channel;
+-
+ };
+
+ struct rfcomm_dev_info {
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 744ed3f..02b9baa 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -41,14 +41,13 @@
+
+ #include <net/bluetooth/bluetooth.h>
+
+-#define VERSION "2.14"
++#define VERSION "2.15"
+
+ /* Bluetooth sockets */
+ #define BT_MAX_PROTO 8
+ static struct net_proto_family *bt_proto[BT_MAX_PROTO];
+ static DEFINE_RWLOCK(bt_proto_lock);
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
+ static const char *bt_key_strings[BT_MAX_PROTO] = {
+ "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
+@@ -86,11 +85,6 @@ static inline void bt_sock_reclassify_lock(struct socket *sock, int proto)
+ bt_slock_key_strings[proto], &bt_slock_key[proto],
+ bt_key_strings[proto], &bt_lock_key[proto]);
+ }
+-#else
+-static inline void bt_sock_reclassify_lock(struct socket *sock, int proto)
+-{
+-}
+-#endif
+
+ int bt_sock_register(int proto, struct net_proto_family *ops)
+ {
+@@ -217,7 +211,8 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
+ continue;
+ }
+
+- if (sk->sk_state == BT_CONNECTED || !newsock) {
++ if (sk->sk_state == BT_CONNECTED || !newsock ||
++ bt_sk(parent)->defer_setup) {
+ bt_accept_unlink(sk);
+ if (newsock)
+ sock_graft(sk, newsock);
+@@ -232,7 +227,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
+ EXPORT_SYMBOL(bt_accept_dequeue);
+
+ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+- struct msghdr *msg, size_t len, int flags)
++ struct msghdr *msg, size_t len, int flags)
+ {
+ int noblock = flags & MSG_DONTWAIT;
+ struct sock *sk = sock->sk;
+@@ -277,7 +272,9 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
+
+ list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
+ sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
+- if (sk->sk_state == BT_CONNECTED)
++ if (sk->sk_state == BT_CONNECTED ||
++ (bt_sk(parent)->defer_setup &&
++ sk->sk_state == BT_CONNECT2))
+ return POLLIN | POLLRDNORM;
+ }
+
+diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
+index c9cac77..0073ec8 100644
+--- a/net/bluetooth/cmtp/core.c
++++ b/net/bluetooth/cmtp/core.c
+@@ -126,8 +126,7 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
+
+ session->reassembly[id] = nskb;
+
+- if (skb)
+- kfree_skb(skb);
++ kfree_skb(skb);
+ }
+
+ static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *skb)
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index a4a789f..1181db0 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -123,6 +123,8 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
+ conn->state = BT_CONNECT;
+ conn->out = 1;
+
++ conn->attempt++;
++
+ cp.handle = cpu_to_le16(handle);
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+
+@@ -139,6 +141,8 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
+ conn->state = BT_CONNECT;
+ conn->out = 1;
+
++ conn->attempt++;
++
+ cp.handle = cpu_to_le16(handle);
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+
+@@ -155,6 +159,7 @@ static void hci_conn_timeout(unsigned long arg)
+ {
+ struct hci_conn *conn = (void *) arg;
+ struct hci_dev *hdev = conn->hdev;
++ __u8 reason;
+
+ BT_DBG("conn %p state %d", conn, conn->state);
+
+@@ -173,7 +178,8 @@ static void hci_conn_timeout(unsigned long arg)
+ break;
+ case BT_CONFIG:
+ case BT_CONNECTED:
+- hci_acl_disconn(conn, 0x13);
++ reason = hci_proto_disconn_ind(conn);
++ hci_acl_disconn(conn, reason);
+ break;
+ default:
+ conn->state = BT_CLOSED;
+@@ -216,12 +222,13 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
+ break;
+ case SCO_LINK:
+ if (lmp_esco_capable(hdev))
+- conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK;
++ conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
++ (hdev->esco_type & EDR_ESCO_MASK);
+ else
+ conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
+ break;
+ case ESCO_LINK:
+- conn->pkt_type = hdev->esco_type;
++ conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
+ break;
+ }
+
+@@ -280,6 +287,8 @@ int hci_conn_del(struct hci_conn *conn)
+
+ skb_queue_purge(&conn->data_q);
+
++ hci_conn_del_sysfs(conn);
++
+ return 0;
+ }
+
+@@ -325,7 +334,7 @@ EXPORT_SYMBOL(hci_get_route);
+
+ /* Create SCO or ACL connection.
+ * Device _must_ be locked */
+-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type)
++struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+ {
+ struct hci_conn *acl;
+ struct hci_conn *sco;
+@@ -340,6 +349,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
+ hci_conn_hold(acl);
+
+ if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
++ acl->sec_level = sec_level;
+ acl->auth_type = auth_type;
+ hci_acl_connect(acl);
+ }
+@@ -385,51 +395,59 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
+ EXPORT_SYMBOL(hci_conn_check_link_mode);
+
+ /* Authenticate remote device */
+-int hci_conn_auth(struct hci_conn *conn)
++static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ {
+ BT_DBG("conn %p", conn);
+
+- if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) {
+- if (!(conn->auth_type & 0x01)) {
+- conn->auth_type |= 0x01;
+- conn->link_mode &= ~HCI_LM_AUTH;
+- }
+- }
+-
+- if (conn->link_mode & HCI_LM_AUTH)
++ if (sec_level > conn->sec_level)
++ conn->sec_level = sec_level;
++ else if (conn->link_mode & HCI_LM_AUTH)
+ return 1;
+
++ conn->auth_type = auth_type;
++
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ struct hci_cp_auth_requested cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
+ }
++
+ return 0;
+ }
+-EXPORT_SYMBOL(hci_conn_auth);
+
+-/* Enable encryption */
+-int hci_conn_encrypt(struct hci_conn *conn)
++/* Enable security */
++int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ {
+ BT_DBG("conn %p", conn);
+
++ if (sec_level == BT_SECURITY_SDP)
++ return 1;
++
++ if (sec_level == BT_SECURITY_LOW) {
++ if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0)
++ return hci_conn_auth(conn, sec_level, auth_type);
++ else
++ return 1;
++ }
++
+ if (conn->link_mode & HCI_LM_ENCRYPT)
+- return hci_conn_auth(conn);
++ return hci_conn_auth(conn, sec_level, auth_type);
+
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ return 0;
+
+- if (hci_conn_auth(conn)) {
++ if (hci_conn_auth(conn, sec_level, auth_type)) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.encrypt = 1;
+ hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
+ sizeof(cp), &cp);
+ }
++
+ return 0;
+ }
+-EXPORT_SYMBOL(hci_conn_encrypt);
++EXPORT_SYMBOL(hci_conn_security);
+
+ /* Change link key */
+ int hci_conn_change_link_key(struct hci_conn *conn)
+@@ -442,12 +460,13 @@ int hci_conn_change_link_key(struct hci_conn *conn)
+ hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
+ sizeof(cp), &cp);
+ }
++
+ return 0;
+ }
+ EXPORT_SYMBOL(hci_conn_change_link_key);
+
+ /* Switch role */
+-int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
++int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
+ {
+ BT_DBG("conn %p", conn);
+
+@@ -460,6 +479,7 @@ int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
+ cp.role = role;
+ hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
+ }
++
+ return 0;
+ }
+ EXPORT_SYMBOL(hci_conn_switch_role);
+@@ -542,9 +562,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
+
+ c->state = BT_CLOSED;
+
+- hci_conn_del_sysfs(c);
+-
+- hci_proto_disconn_ind(c, 0x16);
++ hci_proto_disconn_cfm(c, 0x16);
+ hci_conn_del(c);
+ }
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index ba78cc1..cd06151 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1565,8 +1565,7 @@ static void hci_cmd_task(unsigned long arg)
+
+ /* Send queued commands */
+ if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
+- if (hdev->sent_cmd)
+- kfree_skb(hdev->sent_cmd);
++ kfree_skb(hdev->sent_cmd);
+
+ if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
+ atomic_dec(&hdev->cmd_cnt);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index f91ba69..5553424 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -484,6 +484,15 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
+ if (hdev->features[4] & LMP_EV5)
+ hdev->esco_type |= (ESCO_EV5);
+
++ if (hdev->features[5] & LMP_EDR_ESCO_2M)
++ hdev->esco_type |= (ESCO_2EV3);
++
++ if (hdev->features[5] & LMP_EDR_ESCO_3M)
++ hdev->esco_type |= (ESCO_3EV3);
++
++ if (hdev->features[5] & LMP_EDR_3S_ESCO)
++ hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
++
+ BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
+ hdev->features[0], hdev->features[1],
+ hdev->features[2], hdev->features[3],
+@@ -914,7 +923,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
+ if (ev->status) {
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_del(conn);
+- }
++ } else if (ev->link_type != ACL_LINK)
++ hci_proto_connect_cfm(conn, ev->status);
+
+ unlock:
+ hci_dev_unlock(hdev);
+@@ -1009,9 +1019,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
+ if (conn) {
+ conn->state = BT_CLOSED;
+
+- hci_conn_del_sysfs(conn);
+-
+- hci_proto_disconn_ind(conn, ev->reason);
++ hci_proto_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
+ }
+
+@@ -1600,7 +1608,8 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
+
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status && hdev->ssp_mode > 0 &&
+- conn->ssp_mode > 0 && conn->out) {
++ conn->ssp_mode > 0 && conn->out &&
++ conn->sec_level != BT_SECURITY_SDP) {
+ struct hci_cp_auth_requested cp;
+ cp.handle = ev->handle;
+ hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
+@@ -1637,6 +1646,13 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
+ conn->type = SCO_LINK;
+ }
+
++ if (conn->out && ev->status == 0x1c && conn->attempt < 2) {
++ conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
++ (hdev->esco_type & EDR_ESCO_MASK);
++ hci_setup_sync(conn, conn->link->handle);
++ goto unlock;
++ }
++
+ if (!ev->status) {
+ conn->handle = __le16_to_cpu(ev->handle);
+ conn->state = BT_CONNECTED;
+diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
+index b93748e..ca4d3b4 100644
+--- a/net/bluetooth/l2cap.c
++++ b/net/bluetooth/l2cap.c
+@@ -50,9 +50,10 @@
+ #include <net/bluetooth/hci_core.h>
+ #include <net/bluetooth/l2cap.h>
+
+-#define VERSION "2.11"
++#define VERSION "2.13"
+
+-static u32 l2cap_feat_mask = 0x0000;
++static u32 l2cap_feat_mask = 0x0080;
++static u8 l2cap_fixed_chan[8] = { 0x02, };
+
+ static const struct proto_ops l2cap_sock_ops;
+
+@@ -77,9 +78,10 @@ static void l2cap_sock_timeout(unsigned long arg)
+
+ bh_lock_sock(sk);
+
+- if (sk->sk_state == BT_CONNECT &&
+- (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
+- L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
++ if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
++ reason = ECONNREFUSED;
++ else if (sk->sk_state == BT_CONNECT &&
++ l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
+ reason = ECONNREFUSED;
+ else
+ reason = ETIMEDOUT;
+@@ -204,6 +206,8 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
+
+ BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
+
++ conn->disc_reason = 0x13;
++
+ l2cap_pi(sk)->conn = conn;
+
+ if (sk->sk_type == SOCK_SEQPACKET) {
+@@ -259,18 +263,35 @@ static void l2cap_chan_del(struct sock *sk, int err)
+ }
+
+ /* Service level security */
+-static inline int l2cap_check_link_mode(struct sock *sk)
++static inline int l2cap_check_security(struct sock *sk)
+ {
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
++ __u8 auth_type;
+
+- if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
+- (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
+- return hci_conn_encrypt(conn->hcon);
++ if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
++ if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
++ auth_type = HCI_AT_NO_BONDING_MITM;
++ else
++ auth_type = HCI_AT_NO_BONDING;
+
+- if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
+- return hci_conn_auth(conn->hcon);
++ if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
++ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
++ } else {
++ switch (l2cap_pi(sk)->sec_level) {
++ case BT_SECURITY_HIGH:
++ auth_type = HCI_AT_GENERAL_BONDING_MITM;
++ break;
++ case BT_SECURITY_MEDIUM:
++ auth_type = HCI_AT_GENERAL_BONDING;
++ break;
++ default:
++ auth_type = HCI_AT_NO_BONDING;
++ break;
++ }
++ }
+
+- return 1;
++ return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
++ auth_type);
+ }
+
+ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
+@@ -312,7 +333,10 @@ static void l2cap_do_start(struct sock *sk)
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
+- if (l2cap_check_link_mode(sk)) {
++ if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
++ return;
++
++ if (l2cap_check_security(sk)) {
+ struct l2cap_conn_req req;
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+ req.psm = l2cap_pi(sk)->psm;
+@@ -356,7 +380,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ }
+
+ if (sk->sk_state == BT_CONNECT) {
+- if (l2cap_check_link_mode(sk)) {
++ if (l2cap_check_security(sk)) {
+ struct l2cap_conn_req req;
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+ req.psm = l2cap_pi(sk)->psm;
+@@ -371,10 +395,18 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+
+- if (l2cap_check_link_mode(sk)) {
+- sk->sk_state = BT_CONFIG;
+- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
++ if (l2cap_check_security(sk)) {
++ if (bt_sk(sk)->defer_setup) {
++ struct sock *parent = bt_sk(sk)->parent;
++ rsp.result = cpu_to_le16(L2CAP_CR_PEND);
++ rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
++ parent->sk_data_ready(parent, 0);
++
++ } else {
++ sk->sk_state = BT_CONFIG;
++ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
++ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
++ }
+ } else {
+ rsp.result = cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
+@@ -426,7 +458,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
+ read_lock(&l->lock);
+
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+- if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
++ if (l2cap_pi(sk)->force_reliable)
+ sk->sk_err = err;
+ }
+
+@@ -437,6 +469,7 @@ static void l2cap_info_timeout(unsigned long arg)
+ {
+ struct l2cap_conn *conn = (void *) arg;
+
++ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+ conn->info_ident = 0;
+
+ l2cap_conn_start(conn);
+@@ -470,6 +503,8 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
+ spin_lock_init(&conn->lock);
+ rwlock_init(&conn->chan_list.lock);
+
++ conn->disc_reason = 0x13;
++
+ return conn;
+ }
+
+@@ -483,8 +518,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
+
+ BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+
+- if (conn->rx_skb)
+- kfree_skb(conn->rx_skb);
++ kfree_skb(conn->rx_skb);
+
+ /* Kill channels */
+ while ((sk = conn->chan_list.head)) {
+@@ -608,7 +642,6 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
+
+ case BT_CONNECTED:
+ case BT_CONFIG:
+- case BT_CONNECT2:
+ if (sk->sk_type == SOCK_SEQPACKET) {
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ struct l2cap_disconn_req req;
+@@ -624,6 +657,27 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
+ l2cap_chan_del(sk, reason);
+ break;
+
++ case BT_CONNECT2:
++ if (sk->sk_type == SOCK_SEQPACKET) {
++ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
++ struct l2cap_conn_rsp rsp;
++ __u16 result;
++
++ if (bt_sk(sk)->defer_setup)
++ result = L2CAP_CR_SEC_BLOCK;
++ else
++ result = L2CAP_CR_BAD_PSM;
++
++ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
++ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
++ rsp.result = cpu_to_le16(result);
++ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
++ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
++ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
++ } else
++ l2cap_chan_del(sk, reason);
++ break;
++
+ case BT_CONNECT:
+ case BT_DISCONN:
+ l2cap_chan_del(sk, reason);
+@@ -653,13 +707,19 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
+
+ if (parent) {
+ sk->sk_type = parent->sk_type;
++ bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
++
+ pi->imtu = l2cap_pi(parent)->imtu;
+ pi->omtu = l2cap_pi(parent)->omtu;
+- pi->link_mode = l2cap_pi(parent)->link_mode;
++ pi->sec_level = l2cap_pi(parent)->sec_level;
++ pi->role_switch = l2cap_pi(parent)->role_switch;
++ pi->force_reliable = l2cap_pi(parent)->force_reliable;
+ } else {
+ pi->imtu = L2CAP_DEFAULT_MTU;
+ pi->omtu = 0;
+- pi->link_mode = 0;
++ pi->sec_level = BT_SECURITY_LOW;
++ pi->role_switch = 0;
++ pi->force_reliable = 0;
+ }
+
+ /* Default config options */
+@@ -723,17 +783,24 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
+ return 0;
+ }
+
+-static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
++static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ {
+- struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
+ struct sock *sk = sock->sk;
+- int err = 0;
++ struct sockaddr_l2 la;
++ int len, err = 0;
+
+- BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
++ BT_DBG("sk %p", sk);
+
+ if (!addr || addr->sa_family != AF_BLUETOOTH)
+ return -EINVAL;
+
++ memset(&la, 0, sizeof(la));
++ len = min_t(unsigned int, sizeof(la), alen);
++ memcpy(&la, addr, len);
++
++ if (la.l2_cid)
++ return -EINVAL;
++
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_OPEN) {
+@@ -741,7 +808,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
+ goto done;
+ }
+
+- if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
++ if (la.l2_psm && btohs(la.l2_psm) < 0x1001 &&
+ !capable(CAP_NET_BIND_SERVICE)) {
+ err = -EACCES;
+ goto done;
+@@ -749,14 +816,17 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
+
+ write_lock_bh(&l2cap_sk_list.lock);
+
+- if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
++ if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
+ err = -EADDRINUSE;
+ } else {
+ /* Save source address */
+- bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
+- l2cap_pi(sk)->psm = la->l2_psm;
+- l2cap_pi(sk)->sport = la->l2_psm;
++ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
++ l2cap_pi(sk)->psm = la.l2_psm;
++ l2cap_pi(sk)->sport = la.l2_psm;
+ sk->sk_state = BT_BOUND;
++
++ if (btohs(la.l2_psm) == 0x0001 || btohs(la.l2_psm) == 0x0003)
++ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+ }
+
+ write_unlock_bh(&l2cap_sk_list.lock);
+@@ -776,7 +846,8 @@ static int l2cap_do_connect(struct sock *sk)
+ __u8 auth_type;
+ int err = 0;
+
+- BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
++ BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
++ l2cap_pi(sk)->psm);
+
+ if (!(hdev = hci_get_route(dst, src)))
+ return -EHOSTUNREACH;
+@@ -785,21 +856,42 @@ static int l2cap_do_connect(struct sock *sk)
+
+ err = -ENOMEM;
+
+- if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
+- l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
+- l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
+- if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
++ if (sk->sk_type == SOCK_RAW) {
++ switch (l2cap_pi(sk)->sec_level) {
++ case BT_SECURITY_HIGH:
++ auth_type = HCI_AT_DEDICATED_BONDING_MITM;
++ break;
++ case BT_SECURITY_MEDIUM:
++ auth_type = HCI_AT_DEDICATED_BONDING;
++ break;
++ default:
++ auth_type = HCI_AT_NO_BONDING;
++ break;
++ }
++ } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
++ if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
+ auth_type = HCI_AT_NO_BONDING_MITM;
+ else
+- auth_type = HCI_AT_GENERAL_BONDING_MITM;
+- } else {
+- if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
+ auth_type = HCI_AT_NO_BONDING;
+- else
++
++ if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
++ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
++ } else {
++ switch (l2cap_pi(sk)->sec_level) {
++ case BT_SECURITY_HIGH:
++ auth_type = HCI_AT_GENERAL_BONDING_MITM;
++ break;
++ case BT_SECURITY_MEDIUM:
+ auth_type = HCI_AT_GENERAL_BONDING;
++ break;
++ default:
++ auth_type = HCI_AT_NO_BONDING;
++ break;
++ }
+ }
+
+- hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
++ hcon = hci_connect(hdev, ACL_LINK, dst,
++ l2cap_pi(sk)->sec_level, auth_type);
+ if (!hcon)
+ goto done;
+
+@@ -835,20 +927,25 @@ done:
+
+ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
+ {
+- struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
+ struct sock *sk = sock->sk;
+- int err = 0;
+-
+- lock_sock(sk);
++ struct sockaddr_l2 la;
++ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+- if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
+- err = -EINVAL;
+- goto done;
+- }
++ if (!addr || addr->sa_family != AF_BLUETOOTH)
++ return -EINVAL;
++
++ memset(&la, 0, sizeof(la));
++ len = min_t(unsigned int, sizeof(la), alen);
++ memcpy(&la, addr, len);
++
++ if (la.l2_cid)
++ return -EINVAL;
++
++ lock_sock(sk);
+
+- if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
++ if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
+ err = -EINVAL;
+ goto done;
+ }
+@@ -875,8 +972,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
+ }
+
+ /* Set destination address and psm */
+- bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
+- l2cap_pi(sk)->psm = la->l2_psm;
++ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
++ l2cap_pi(sk)->psm = la.l2_psm;
+
+ if ((err = l2cap_do_connect(sk)))
+ goto done;
+@@ -1000,12 +1097,16 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
+ addr->sa_family = AF_BLUETOOTH;
+ *len = sizeof(struct sockaddr_l2);
+
+- if (peer)
++ if (peer) {
++ la->l2_psm = l2cap_pi(sk)->psm;
+ bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
+- else
++ la->l2_cid = htobs(l2cap_pi(sk)->dcid);
++ } else {
++ la->l2_psm = l2cap_pi(sk)->sport;
+ bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
++ la->l2_cid = htobs(l2cap_pi(sk)->scid);
++ }
+
+- la->l2_psm = l2cap_pi(sk)->psm;
+ return 0;
+ }
+
+@@ -1106,11 +1207,38 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
+ return err;
+ }
+
+-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
++static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
++{
++ struct sock *sk = sock->sk;
++
++ lock_sock(sk);
++
++ if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
++ struct l2cap_conn_rsp rsp;
++
++ sk->sk_state = BT_CONFIG;
++
++ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
++ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
++ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
++ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
++ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
++ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
++
++ release_sock(sk);
++ return 0;
++ }
++
++ release_sock(sk);
++
++ return bt_sock_recvmsg(iocb, sock, msg, len, flags);
++}
++
++static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
+ {
+ struct sock *sk = sock->sk;
+ struct l2cap_options opts;
+- int err = 0, len;
++ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -1140,7 +1268,15 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
+ break;
+ }
+
+- l2cap_pi(sk)->link_mode = opt;
++ if (opt & L2CAP_LM_AUTH)
++ l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
++ if (opt & L2CAP_LM_ENCRYPT)
++ l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
++ if (opt & L2CAP_LM_SECURE)
++ l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
++
++ l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
++ l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ break;
+
+ default:
+@@ -1152,12 +1288,77 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
+ return err;
+ }
+
+-static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
++static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
++{
++ struct sock *sk = sock->sk;
++ struct bt_security sec;
++ int len, err = 0;
++ u32 opt;
++
++ BT_DBG("sk %p", sk);
++
++ if (level == SOL_L2CAP)
++ return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
++
++ if (level != SOL_BLUETOOTH)
++ return -ENOPROTOOPT;
++
++ lock_sock(sk);
++
++ switch (optname) {
++ case BT_SECURITY:
++ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
++ err = -EINVAL;
++ break;
++ }
++
++ sec.level = BT_SECURITY_LOW;
++
++ len = min_t(unsigned int, sizeof(sec), optlen);
++ if (copy_from_user((char *) &sec, optval, len)) {
++ err = -EFAULT;
++ break;
++ }
++
++ if (sec.level < BT_SECURITY_LOW ||
++ sec.level > BT_SECURITY_HIGH) {
++ err = -EINVAL;
++ break;
++ }
++
++ l2cap_pi(sk)->sec_level = sec.level;
++ break;
++
++ case BT_DEFER_SETUP:
++ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
++ err = -EINVAL;
++ break;
++ }
++
++ if (get_user(opt, (u32 __user *) optval)) {
++ err = -EFAULT;
++ break;
++ }
++
++ bt_sk(sk)->defer_setup = opt;
++ break;
++
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++
++static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+ {
+ struct sock *sk = sock->sk;
+ struct l2cap_options opts;
+ struct l2cap_conninfo cinfo;
+ int len, err = 0;
++ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+@@ -1180,12 +1381,36 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
+ break;
+
+ case L2CAP_LM:
+- if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
++ switch (l2cap_pi(sk)->sec_level) {
++ case BT_SECURITY_LOW:
++ opt = L2CAP_LM_AUTH;
++ break;
++ case BT_SECURITY_MEDIUM:
++ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
++ break;
++ case BT_SECURITY_HIGH:
++ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
++ L2CAP_LM_SECURE;
++ break;
++ default:
++ opt = 0;
++ break;
++ }
++
++ if (l2cap_pi(sk)->role_switch)
++ opt |= L2CAP_LM_MASTER;
++
++ if (l2cap_pi(sk)->force_reliable)
++ opt |= L2CAP_LM_RELIABLE;
++
++ if (put_user(opt, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
+ case L2CAP_CONNINFO:
+- if (sk->sk_state != BT_CONNECTED) {
++ if (sk->sk_state != BT_CONNECTED &&
++ !(sk->sk_state == BT_CONNECT2 &&
++ bt_sk(sk)->defer_setup)) {
+ err = -ENOTCONN;
+ break;
+ }
+@@ -1208,6 +1433,60 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
+ return err;
+ }
+
++static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
++{
++ struct sock *sk = sock->sk;
++ struct bt_security sec;
++ int len, err = 0;
++
++ BT_DBG("sk %p", sk);
++
++ if (level == SOL_L2CAP)
++ return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
++
++ if (level != SOL_BLUETOOTH)
++ return -ENOPROTOOPT;
++
++ if (get_user(len, optlen))
++ return -EFAULT;
++
++ lock_sock(sk);
++
++ switch (optname) {
++ case BT_SECURITY:
++ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
++ err = -EINVAL;
++ break;
++ }
++
++ sec.level = l2cap_pi(sk)->sec_level;
++
++ len = min_t(unsigned int, len, sizeof(sec));
++ if (copy_to_user(optval, (char *) &sec, len))
++ err = -EFAULT;
++
++ break;
++
++ case BT_DEFER_SETUP:
++ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
++ err = -EINVAL;
++ break;
++ }
++
++ if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
++ err = -EFAULT;
++
++ break;
++
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++
+ static int l2cap_sock_shutdown(struct socket *sock, int how)
+ {
+ struct sock *sk = sock->sk;
+@@ -1270,11 +1549,6 @@ static void l2cap_chan_ready(struct sock *sk)
+ */
+ parent->sk_data_ready(parent, 0);
+ }
+-
+- if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
+- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+- hci_conn_change_link_key(conn->hcon);
+- }
+ }
+
+ /* Copy frame to all raw sockets on that connection */
+@@ -1549,8 +1823,11 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
+
+ if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
+ cmd->ident == conn->info_ident) {
+- conn->info_ident = 0;
+ del_timer(&conn->info_timer);
++
++ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
++ conn->info_ident = 0;
++
+ l2cap_conn_start(conn);
+ }
+
+@@ -1580,6 +1857,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
+ /* Check if the ACL is secure enough (if not SDP) */
+ if (psm != cpu_to_le16(0x0001) &&
+ !hci_conn_check_link_mode(conn->hcon)) {
++ conn->disc_reason = 0x05;
+ result = L2CAP_CR_SEC_BLOCK;
+ goto response;
+ }
+@@ -1621,11 +1899,18 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
+
+ l2cap_pi(sk)->ident = cmd->ident;
+
+- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
+- if (l2cap_check_link_mode(sk)) {
+- sk->sk_state = BT_CONFIG;
+- result = L2CAP_CR_SUCCESS;
+- status = L2CAP_CS_NO_INFO;
++ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
++ if (l2cap_check_security(sk)) {
++ if (bt_sk(sk)->defer_setup) {
++ sk->sk_state = BT_CONNECT2;
++ result = L2CAP_CR_PEND;
++ status = L2CAP_CS_AUTHOR_PEND;
++ parent->sk_data_ready(parent, 0);
++ } else {
++ sk->sk_state = BT_CONFIG;
++ result = L2CAP_CR_SUCCESS;
++ status = L2CAP_CS_NO_INFO;
++ }
+ } else {
+ sk->sk_state = BT_CONNECT2;
+ result = L2CAP_CR_PEND;
+@@ -1695,11 +1980,14 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
+ l2cap_pi(sk)->dcid = dcid;
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+
++ l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
++
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(sk, req), req);
+ break;
+
+ case L2CAP_CR_PEND:
++ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
+ break;
+
+ default:
+@@ -1908,6 +2196,14 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
+ put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
+ l2cap_send_cmd(conn, cmd->ident,
+ L2CAP_INFO_RSP, sizeof(buf), buf);
++ } else if (type == L2CAP_IT_FIXED_CHAN) {
++ u8 buf[12];
++ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
++ rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
++ rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
++ memcpy(buf + 4, l2cap_fixed_chan, 8);
++ l2cap_send_cmd(conn, cmd->ident,
++ L2CAP_INFO_RSP, sizeof(buf), buf);
+ } else {
+ struct l2cap_info_rsp rsp;
+ rsp.type = cpu_to_le16(type);
+@@ -1929,14 +2225,31 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
+
+ BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
+
+- conn->info_ident = 0;
+-
+ del_timer(&conn->info_timer);
+
+- if (type == L2CAP_IT_FEAT_MASK)
++ if (type == L2CAP_IT_FEAT_MASK) {
+ conn->feat_mask = get_unaligned_le32(rsp->data);
+
+- l2cap_conn_start(conn);
++ if (conn->feat_mask & 0x0080) {
++ struct l2cap_info_req req;
++ req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
++
++ conn->info_ident = l2cap_get_ident(conn);
++
++ l2cap_send_cmd(conn, conn->info_ident,
++ L2CAP_INFO_REQ, sizeof(req), &req);
++ } else {
++ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
++ conn->info_ident = 0;
++
++ l2cap_conn_start(conn);
++ }
++ } else if (type == L2CAP_IT_FIXED_CHAN) {
++ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
++ conn->info_ident = 0;
++
++ l2cap_conn_start(conn);
++ }
+
+ return 0;
+ }
+@@ -2143,10 +2456,15 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+ continue;
+
+ if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
+- lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
++ lm1 |= HCI_LM_ACCEPT;
++ if (l2cap_pi(sk)->role_switch)
++ lm1 |= HCI_LM_MASTER;
+ exact++;
+- } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+- lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
++ } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
++ lm2 |= HCI_LM_ACCEPT;
++ if (l2cap_pi(sk)->role_switch)
++ lm2 |= HCI_LM_MASTER;
++ }
+ }
+ read_unlock(&l2cap_sk_list.lock);
+
+@@ -2172,89 +2490,48 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ return 0;
+ }
+
+-static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
++static int l2cap_disconn_ind(struct hci_conn *hcon)
+ {
+- BT_DBG("hcon %p reason %d", hcon, reason);
++ struct l2cap_conn *conn = hcon->l2cap_data;
+
+- if (hcon->type != ACL_LINK)
+- return 0;
++ BT_DBG("hcon %p", hcon);
+
+- l2cap_conn_del(hcon, bt_err(reason));
++ if (hcon->type != ACL_LINK || !conn)
++ return 0x13;
+
+- return 0;
++ return conn->disc_reason;
+ }
+
+-static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
++static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+ {
+- struct l2cap_chan_list *l;
+- struct l2cap_conn *conn = hcon->l2cap_data;
+- struct sock *sk;
++ BT_DBG("hcon %p reason %d", hcon, reason);
+
+- if (!conn)
++ if (hcon->type != ACL_LINK)
+ return 0;
+
+- l = &conn->chan_list;
+-
+- BT_DBG("conn %p", conn);
+-
+- read_lock(&l->lock);
+-
+- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+- struct l2cap_pinfo *pi = l2cap_pi(sk);
+-
+- bh_lock_sock(sk);
+-
+- if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
+- !(hcon->link_mode & HCI_LM_ENCRYPT) &&
+- !status) {
+- bh_unlock_sock(sk);
+- continue;
+- }
+-
+- if (sk->sk_state == BT_CONNECT) {
+- if (!status) {
+- struct l2cap_conn_req req;
+- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+- req.psm = l2cap_pi(sk)->psm;
+-
+- l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+-
+- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+- L2CAP_CONN_REQ, sizeof(req), &req);
+- } else {
+- l2cap_sock_clear_timer(sk);
+- l2cap_sock_set_timer(sk, HZ / 10);
+- }
+- } else if (sk->sk_state == BT_CONNECT2) {
+- struct l2cap_conn_rsp rsp;
+- __u16 result;
++ l2cap_conn_del(hcon, bt_err(reason));
+
+- if (!status) {
+- sk->sk_state = BT_CONFIG;
+- result = L2CAP_CR_SUCCESS;
+- } else {
+- sk->sk_state = BT_DISCONN;
+- l2cap_sock_set_timer(sk, HZ / 10);
+- result = L2CAP_CR_SEC_BLOCK;
+- }
++ return 0;
++}
+
+- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+- rsp.result = cpu_to_le16(result);
+- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+- }
++static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
++{
++ if (sk->sk_type != SOCK_SEQPACKET)
++ return;
+
+- bh_unlock_sock(sk);
++ if (encrypt == 0x00) {
++ if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
++ l2cap_sock_clear_timer(sk);
++ l2cap_sock_set_timer(sk, HZ * 5);
++ } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
++ __l2cap_sock_close(sk, ECONNREFUSED);
++ } else {
++ if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
++ l2cap_sock_clear_timer(sk);
+ }
+-
+- read_unlock(&l->lock);
+-
+- return 0;
+ }
+
+-static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
++static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ {
+ struct l2cap_chan_list *l;
+ struct l2cap_conn *conn = hcon->l2cap_data;
+@@ -2270,15 +2547,16 @@ static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ read_lock(&l->lock);
+
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+- struct l2cap_pinfo *pi = l2cap_pi(sk);
+-
+ bh_lock_sock(sk);
+
+- if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
+- (sk->sk_state == BT_CONNECTED ||
+- sk->sk_state == BT_CONFIG) &&
+- !status && encrypt == 0x00) {
+- __l2cap_sock_close(sk, ECONNREFUSED);
++ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
++ bh_unlock_sock(sk);
++ continue;
++ }
++
++ if (!status && (sk->sk_state == BT_CONNECTED ||
++ sk->sk_state == BT_CONFIG)) {
++ l2cap_check_encryption(sk, encrypt);
+ bh_unlock_sock(sk);
+ continue;
+ }
+@@ -2376,7 +2654,7 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
+ goto drop;
+
+ skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+- skb->len);
++ skb->len);
+ conn->rx_len = len - skb->len;
+ } else {
+ BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
+@@ -2398,7 +2676,7 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
+ }
+
+ skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+- skb->len);
++ skb->len);
+ conn->rx_len -= skb->len;
+
+ if (!conn->rx_len) {
+@@ -2424,10 +2702,10 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+- str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
++ str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
+ batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
+ sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
+- pi->imtu, pi->omtu, pi->link_mode);
++ pi->imtu, pi->omtu, pi->sec_level);
+ }
+
+ read_unlock_bh(&l2cap_sk_list.lock);
+@@ -2447,7 +2725,7 @@ static const struct proto_ops l2cap_sock_ops = {
+ .accept = l2cap_sock_accept,
+ .getname = l2cap_sock_getname,
+ .sendmsg = l2cap_sock_sendmsg,
+- .recvmsg = bt_sock_recvmsg,
++ .recvmsg = l2cap_sock_recvmsg,
+ .poll = bt_sock_poll,
+ .ioctl = bt_sock_ioctl,
+ .mmap = sock_no_mmap,
+@@ -2469,8 +2747,8 @@ static struct hci_proto l2cap_hci_proto = {
+ .connect_ind = l2cap_connect_ind,
+ .connect_cfm = l2cap_connect_cfm,
+ .disconn_ind = l2cap_disconn_ind,
+- .auth_cfm = l2cap_auth_cfm,
+- .encrypt_cfm = l2cap_encrypt_cfm,
++ .disconn_cfm = l2cap_disconn_cfm,
++ .security_cfm = l2cap_security_cfm,
+ .recv_acldata = l2cap_recv_acldata
+ };
+
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index acd84fd..1d0fb0f 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -46,7 +46,7 @@
+ #include <net/bluetooth/l2cap.h>
+ #include <net/bluetooth/rfcomm.h>
+
+-#define VERSION "1.10"
++#define VERSION "1.11"
+
+ static int disable_cfc = 0;
+ static int channel_mtu = -1;
+@@ -223,19 +223,25 @@ static int rfcomm_l2sock_create(struct socket **sock)
+ return err;
+ }
+
+-static inline int rfcomm_check_link_mode(struct rfcomm_dlc *d)
++static inline int rfcomm_check_security(struct rfcomm_dlc *d)
+ {
+ struct sock *sk = d->session->sock->sk;
++ __u8 auth_type;
+
+- if (d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) {
+- if (!hci_conn_encrypt(l2cap_pi(sk)->conn->hcon))
+- return 1;
+- } else if (d->link_mode & RFCOMM_LM_AUTH) {
+- if (!hci_conn_auth(l2cap_pi(sk)->conn->hcon))
+- return 1;
++ switch (d->sec_level) {
++ case BT_SECURITY_HIGH:
++ auth_type = HCI_AT_GENERAL_BONDING_MITM;
++ break;
++ case BT_SECURITY_MEDIUM:
++ auth_type = HCI_AT_GENERAL_BONDING;
++ break;
++ default:
++ auth_type = HCI_AT_NO_BONDING;
++ break;
+ }
+
+- return 0;
++ return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level,
++ auth_type);
+ }
+
+ /* ---- RFCOMM DLCs ---- */
+@@ -388,10 +394,10 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
+ d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc;
+
+ if (s->state == BT_CONNECTED) {
+- if (rfcomm_check_link_mode(d))
+- set_bit(RFCOMM_AUTH_PENDING, &d->flags);
+- else
++ if (rfcomm_check_security(d))
+ rfcomm_send_pn(s, 1, d);
++ else
++ set_bit(RFCOMM_AUTH_PENDING, &d->flags);
+ }
+
+ rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT);
+@@ -421,9 +427,16 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
+ d, d->state, d->dlci, err, s);
+
+ switch (d->state) {
+- case BT_CONNECTED:
+- case BT_CONFIG:
+ case BT_CONNECT:
++ case BT_CONFIG:
++ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
++ set_bit(RFCOMM_AUTH_REJECT, &d->flags);
++ rfcomm_schedule(RFCOMM_SCHED_AUTH);
++ break;
++ }
++ /* Fall through */
++
++ case BT_CONNECTED:
+ d->state = BT_DISCONN;
+ if (skb_queue_empty(&d->tx_queue)) {
+ rfcomm_send_disc(s, d->dlci);
+@@ -434,6 +447,15 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
+ }
+ break;
+
++ case BT_OPEN:
++ case BT_CONNECT2:
++ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
++ set_bit(RFCOMM_AUTH_REJECT, &d->flags);
++ rfcomm_schedule(RFCOMM_SCHED_AUTH);
++ break;
++ }
++ /* Fall through */
++
+ default:
+ rfcomm_dlc_clear_timer(d);
+
+@@ -636,6 +658,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
+ bacpy(&addr.l2_bdaddr, src);
+ addr.l2_family = AF_BLUETOOTH;
+ addr.l2_psm = 0;
++ addr.l2_cid = 0;
+ *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
+ if (*err < 0)
+ goto failed;
+@@ -657,6 +680,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
+ bacpy(&addr.l2_bdaddr, dst);
+ addr.l2_family = AF_BLUETOOTH;
+ addr.l2_psm = htobs(RFCOMM_PSM);
++ addr.l2_cid = 0;
+ *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
+ if (*err == 0 || *err == -EINPROGRESS)
+ return s;
+@@ -1162,7 +1186,7 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
+ return 0;
+ }
+
+-static void rfcomm_dlc_accept(struct rfcomm_dlc *d)
++void rfcomm_dlc_accept(struct rfcomm_dlc *d)
+ {
+ struct sock *sk = d->session->sock->sk;
+
+@@ -1175,12 +1199,31 @@ static void rfcomm_dlc_accept(struct rfcomm_dlc *d)
+ d->state_change(d, 0);
+ rfcomm_dlc_unlock(d);
+
+- if (d->link_mode & RFCOMM_LM_MASTER)
++ if (d->role_switch)
+ hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00);
+
+ rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
+ }
+
++static void rfcomm_check_accept(struct rfcomm_dlc *d)
++{
++ if (rfcomm_check_security(d)) {
++ if (d->defer_setup) {
++ set_bit(RFCOMM_DEFER_SETUP, &d->flags);
++ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
++
++ rfcomm_dlc_lock(d);
++ d->state = BT_CONNECT2;
++ d->state_change(d, 0);
++ rfcomm_dlc_unlock(d);
++ } else
++ rfcomm_dlc_accept(d);
++ } else {
++ set_bit(RFCOMM_AUTH_PENDING, &d->flags);
++ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
++ }
++}
++
+ static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci)
+ {
+ struct rfcomm_dlc *d;
+@@ -1203,11 +1246,7 @@ static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci)
+ if (d) {
+ if (d->state == BT_OPEN) {
+ /* DLC was previously opened by PN request */
+- if (rfcomm_check_link_mode(d)) {
+- set_bit(RFCOMM_AUTH_PENDING, &d->flags);
+- rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
+- } else
+- rfcomm_dlc_accept(d);
++ rfcomm_check_accept(d);
+ }
+ return 0;
+ }
+@@ -1219,11 +1258,7 @@ static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci)
+ d->addr = __addr(s->initiator, dlci);
+ rfcomm_dlc_link(s, d);
+
+- if (rfcomm_check_link_mode(d)) {
+- set_bit(RFCOMM_AUTH_PENDING, &d->flags);
+- rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
+- } else
+- rfcomm_dlc_accept(d);
++ rfcomm_check_accept(d);
+ } else {
+ rfcomm_send_dm(s, dlci);
+ }
+@@ -1637,11 +1672,12 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
+ d = list_entry(p, struct rfcomm_dlc, list);
+ if (d->state == BT_CONFIG) {
+ d->mtu = s->mtu;
+- if (rfcomm_check_link_mode(d)) {
++ if (rfcomm_check_security(d)) {
++ rfcomm_send_pn(s, 1, d);
++ } else {
+ set_bit(RFCOMM_AUTH_PENDING, &d->flags);
+ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
+- } else
+- rfcomm_send_pn(s, 1, d);
++ }
+ }
+ }
+ }
+@@ -1717,11 +1753,17 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
+ if (d->out) {
+ rfcomm_send_pn(s, 1, d);
+ rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT);
+- } else
+- rfcomm_dlc_accept(d);
+- if (d->link_mode & RFCOMM_LM_SECURE) {
+- struct sock *sk = s->sock->sk;
+- hci_conn_change_link_key(l2cap_pi(sk)->conn->hcon);
++ } else {
++ if (d->defer_setup) {
++ set_bit(RFCOMM_DEFER_SETUP, &d->flags);
++ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
++
++ rfcomm_dlc_lock(d);
++ d->state = BT_CONNECT2;
++ d->state_change(d, 0);
++ rfcomm_dlc_unlock(d);
++ } else
++ rfcomm_dlc_accept(d);
+ }
+ continue;
+ } else if (test_and_clear_bit(RFCOMM_AUTH_REJECT, &d->flags)) {
+@@ -1734,6 +1776,9 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
+ continue;
+ }
+
++ if (test_bit(RFCOMM_SEC_PENDING, &d->flags))
++ continue;
++
+ if (test_bit(RFCOMM_TX_THROTTLED, &s->flags))
+ continue;
+
+@@ -1876,6 +1921,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
+ bacpy(&addr.l2_bdaddr, ba);
+ addr.l2_family = AF_BLUETOOTH;
+ addr.l2_psm = htobs(RFCOMM_PSM);
++ addr.l2_cid = 0;
+ err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
+ if (err < 0) {
+ BT_ERR("Bind failed %d", err);
+@@ -1947,42 +1993,7 @@ static int rfcomm_run(void *unused)
+ return 0;
+ }
+
+-static void rfcomm_auth_cfm(struct hci_conn *conn, u8 status)
+-{
+- struct rfcomm_session *s;
+- struct rfcomm_dlc *d;
+- struct list_head *p, *n;
+-
+- BT_DBG("conn %p status 0x%02x", conn, status);
+-
+- s = rfcomm_session_get(&conn->hdev->bdaddr, &conn->dst);
+- if (!s)
+- return;
+-
+- rfcomm_session_hold(s);
+-
+- list_for_each_safe(p, n, &s->dlcs) {
+- d = list_entry(p, struct rfcomm_dlc, list);
+-
+- if ((d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) &&
+- !(conn->link_mode & HCI_LM_ENCRYPT) && !status)
+- continue;
+-
+- if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
+- continue;
+-
+- if (!status)
+- set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
+- else
+- set_bit(RFCOMM_AUTH_REJECT, &d->flags);
+- }
+-
+- rfcomm_session_put(s);
+-
+- rfcomm_schedule(RFCOMM_SCHED_AUTH);
+-}
+-
+-static void rfcomm_encrypt_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
++static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+ {
+ struct rfcomm_session *s;
+ struct rfcomm_dlc *d;
+@@ -1999,18 +2010,29 @@ static void rfcomm_encrypt_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+ list_for_each_safe(p, n, &s->dlcs) {
+ d = list_entry(p, struct rfcomm_dlc, list);
+
+- if ((d->link_mode & (RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE)) &&
+- (d->state == BT_CONNECTED ||
+- d->state == BT_CONFIG) &&
+- !status && encrypt == 0x00) {
+- __rfcomm_dlc_close(d, ECONNREFUSED);
+- continue;
++ if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
++ rfcomm_dlc_clear_timer(d);
++ if (status || encrypt == 0x00) {
++ __rfcomm_dlc_close(d, ECONNREFUSED);
++ continue;
++ }
++ }
++
++ if (d->state == BT_CONNECTED && !status && encrypt == 0x00) {
++ if (d->sec_level == BT_SECURITY_MEDIUM) {
++ set_bit(RFCOMM_SEC_PENDING, &d->flags);
++ rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
++ continue;
++ } else if (d->sec_level == BT_SECURITY_HIGH) {
++ __rfcomm_dlc_close(d, ECONNREFUSED);
++ continue;
++ }
+ }
+
+ if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
+ continue;
+
+- if (!status && encrypt)
++ if (!status)
+ set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
+ else
+ set_bit(RFCOMM_AUTH_REJECT, &d->flags);
+@@ -2023,8 +2045,7 @@ static void rfcomm_encrypt_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+
+ static struct hci_cb rfcomm_cb = {
+ .name = "RFCOMM",
+- .auth_cfm = rfcomm_auth_cfm,
+- .encrypt_cfm = rfcomm_encrypt_cfm
++ .security_cfm = rfcomm_security_cfm
+ };
+
+ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index d3fc6fc..7f48278 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -261,12 +261,19 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
+
+ if (parent) {
+ sk->sk_type = parent->sk_type;
+- pi->link_mode = rfcomm_pi(parent)->link_mode;
++ pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
++
++ pi->sec_level = rfcomm_pi(parent)->sec_level;
++ pi->role_switch = rfcomm_pi(parent)->role_switch;
+ } else {
+- pi->link_mode = 0;
++ pi->dlc->defer_setup = 0;
++
++ pi->sec_level = BT_SECURITY_LOW;
++ pi->role_switch = 0;
+ }
+
+- pi->dlc->link_mode = pi->link_mode;
++ pi->dlc->sec_level = pi->sec_level;
++ pi->dlc->role_switch = pi->role_switch;
+ }
+
+ static struct proto rfcomm_proto = {
+@@ -406,7 +413,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
+ bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
+ rfcomm_pi(sk)->channel = sa->rc_channel;
+
+- d->link_mode = rfcomm_pi(sk)->link_mode;
++ d->sec_level = rfcomm_pi(sk)->sec_level;
++ d->role_switch = rfcomm_pi(sk)->role_switch;
+
+ err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
+ if (!err)
+@@ -554,6 +562,9 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct sk_buff *skb;
+ int sent = 0;
+
++ if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
++ return -ENOTCONN;
++
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+@@ -570,8 +581,11 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+
+ skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+- if (!skb)
++ if (!skb) {
++ if (sent == 0)
++ sent = err;
+ break;
++ }
+ skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
+
+ err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+@@ -630,10 +644,16 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags)
+ {
+ struct sock *sk = sock->sk;
++ struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
+ int err = 0;
+ size_t target, copied = 0;
+ long timeo;
+
++ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
++ rfcomm_dlc_accept(d);
++ return 0;
++ }
++
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+@@ -710,7 +730,7 @@ out:
+ return copied ? : err;
+ }
+
+-static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
++static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
+ {
+ struct sock *sk = sock->sk;
+ int err = 0;
+@@ -727,7 +747,14 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
+ break;
+ }
+
+- rfcomm_pi(sk)->link_mode = opt;
++ if (opt & RFCOMM_LM_AUTH)
++ rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
++ if (opt & RFCOMM_LM_ENCRYPT)
++ rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
++ if (opt & RFCOMM_LM_SECURE)
++ rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH;
++
++ rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER);
+ break;
+
+ default:
+@@ -739,12 +766,76 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
+ return err;
+ }
+
+-static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
++static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
++{
++ struct sock *sk = sock->sk;
++ struct bt_security sec;
++ int len, err = 0;
++ u32 opt;
++
++ BT_DBG("sk %p", sk);
++
++ if (level == SOL_RFCOMM)
++ return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen);
++
++ if (level != SOL_BLUETOOTH)
++ return -ENOPROTOOPT;
++
++ lock_sock(sk);
++
++ switch (optname) {
++ case BT_SECURITY:
++ if (sk->sk_type != SOCK_STREAM) {
++ err = -EINVAL;
++ break;
++ }
++
++ sec.level = BT_SECURITY_LOW;
++
++ len = min_t(unsigned int, sizeof(sec), optlen);
++ if (copy_from_user((char *) &sec, optval, len)) {
++ err = -EFAULT;
++ break;
++ }
++
++ if (sec.level > BT_SECURITY_HIGH) {
++ err = -EINVAL;
++ break;
++ }
++
++ rfcomm_pi(sk)->sec_level = sec.level;
++ break;
++
++ case BT_DEFER_SETUP:
++ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
++ err = -EINVAL;
++ break;
++ }
++
++ if (get_user(opt, (u32 __user *) optval)) {
++ err = -EFAULT;
++ break;
++ }
++
++ bt_sk(sk)->defer_setup = opt;
++ break;
++
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++
++static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+ {
+ struct sock *sk = sock->sk;
+ struct sock *l2cap_sk;
+ struct rfcomm_conninfo cinfo;
+ int len, err = 0;
++ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+@@ -755,12 +846,32 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
+
+ switch (optname) {
+ case RFCOMM_LM:
+- if (put_user(rfcomm_pi(sk)->link_mode, (u32 __user *) optval))
++ switch (rfcomm_pi(sk)->sec_level) {
++ case BT_SECURITY_LOW:
++ opt = RFCOMM_LM_AUTH;
++ break;
++ case BT_SECURITY_MEDIUM:
++ opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT;
++ break;
++ case BT_SECURITY_HIGH:
++ opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
++ RFCOMM_LM_SECURE;
++ break;
++ default:
++ opt = 0;
++ break;
++ }
++
++ if (rfcomm_pi(sk)->role_switch)
++ opt |= RFCOMM_LM_MASTER;
++
++ if (put_user(opt, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
+ case RFCOMM_CONNINFO:
+- if (sk->sk_state != BT_CONNECTED) {
++ if (sk->sk_state != BT_CONNECTED &&
++ !rfcomm_pi(sk)->dlc->defer_setup) {
+ err = -ENOTCONN;
+ break;
+ }
+@@ -785,6 +896,60 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
+ return err;
+ }
+
++static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
++{
++ struct sock *sk = sock->sk;
++ struct bt_security sec;
++ int len, err = 0;
++
++ BT_DBG("sk %p", sk);
++
++ if (level == SOL_RFCOMM)
++ return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen);
++
++ if (level != SOL_BLUETOOTH)
++ return -ENOPROTOOPT;
++
++ if (get_user(len, optlen))
++ return -EFAULT;
++
++ lock_sock(sk);
++
++ switch (optname) {
++ case BT_SECURITY:
++ if (sk->sk_type != SOCK_STREAM) {
++ err = -EINVAL;
++ break;
++ }
++
++ sec.level = rfcomm_pi(sk)->sec_level;
++
++ len = min_t(unsigned int, len, sizeof(sec));
++ if (copy_to_user(optval, (char *) &sec, len))
++ err = -EFAULT;
++
++ break;
++
++ case BT_DEFER_SETUP:
++ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
++ err = -EINVAL;
++ break;
++ }
++
++ if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
++ err = -EFAULT;
++
++ break;
++
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++
+ static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ {
+ struct sock *sk __maybe_unused = sock->sk;
+@@ -888,6 +1053,10 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
+
+ done:
+ bh_unlock_sock(parent);
++
++ if (bt_sk(parent)->defer_setup)
++ parent->sk_state_change(parent);
++
+ return result;
+ }
+
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 46fd8bf..51ae0c3 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -195,7 +195,7 @@ static int sco_connect(struct sock *sk)
+ else
+ type = SCO_LINK;
+
+- hcon = hci_connect(hdev, type, dst, HCI_AT_NO_BONDING);
++ hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ if (!hcon)
+ goto done;
+
+@@ -668,7 +668,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char
+ return err;
+ }
+
+-static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
++static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+ {
+ struct sock *sk = sock->sk;
+ struct sco_options opts;
+@@ -723,6 +723,31 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
+ return err;
+ }
+
++static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
++{
++ struct sock *sk = sock->sk;
++ int len, err = 0;
++
++ BT_DBG("sk %p", sk);
++
++ if (level == SOL_SCO)
++ return sco_sock_getsockopt_old(sock, optname, optval, optlen);
++
++ if (get_user(len, optlen))
++ return -EFAULT;
++
++ lock_sock(sk);
++
++ switch (optname) {
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++
+ static int sco_sock_release(struct socket *sock)
+ {
+ struct sock *sk = sock->sk;
+@@ -832,10 +857,30 @@ done:
+ /* ----- SCO interface with lower layer (HCI) ----- */
+ static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+ {
++ register struct sock *sk;
++ struct hlist_node *node;
++ int lm = 0;
++
++ if (type != SCO_LINK && type != ESCO_LINK)
++ return 0;
++
+ BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
+
+- /* Always accept connection */
+- return HCI_LM_ACCEPT;
++ /* Find listening sockets */
++ read_lock(&sco_sk_list.lock);
++ sk_for_each(sk, node, &sco_sk_list.head) {
++ if (sk->sk_state != BT_LISTEN)
++ continue;
++
++ if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
++ !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
++ lm |= HCI_LM_ACCEPT;
++ break;
++ }
++ }
++ read_unlock(&sco_sk_list.lock);
++
++ return lm;
+ }
+
+ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+@@ -857,7 +902,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+ return 0;
+ }
+
+-static int sco_disconn_ind(struct hci_conn *hcon, __u8 reason)
++static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+ {
+ BT_DBG("hcon %p reason %d", hcon, reason);
+
+@@ -940,7 +985,7 @@ static struct hci_proto sco_hci_proto = {
+ .id = HCI_PROTO_SCO,
+ .connect_ind = sco_connect_ind,
+ .connect_cfm = sco_connect_cfm,
+- .disconn_ind = sco_disconn_ind,
++ .disconn_cfm = sco_disconn_cfm,
+ .recv_scodata = sco_recv_scodata
+ };
+
diff --git a/freed-ora/current/F-12/git-cpufreq.patch b/freed-ora/current/F-12/git-cpufreq.patch
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/freed-ora/current/F-12/git-cpufreq.patch
diff --git a/freed-ora/current/F-12/git-linus.diff b/freed-ora/current/F-12/git-linus.diff
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/freed-ora/current/F-12/git-linus.diff
diff --git a/freed-ora/current/F-12/hda_intel-prealloc-4mb-dmabuffer.patch b/freed-ora/current/F-12/hda_intel-prealloc-4mb-dmabuffer.patch
new file mode 100644
index 000000000..c80f11d9d
--- /dev/null
+++ b/freed-ora/current/F-12/hda_intel-prealloc-4mb-dmabuffer.patch
@@ -0,0 +1,35 @@
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index c8d9178..7d3bb15 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1774,6 +1774,7 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
+ struct azx_pcm *apcm;
+ int pcm_dev = cpcm->device;
+ int s, err;
++ size_t prealloc_min = 64*1024; /* 64KB */
+
+ if (pcm_dev >= AZX_MAX_PCMS) {
+ snd_printk(KERN_ERR SFX "Invalid PCM device number %d\n",
+@@ -1807,10 +1808,21 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
+ if (cpcm->stream[s].substreams)
+ snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
+ }
++
+ /* buffer pre-allocation */
++
++ /* subtle, don't allocate a big buffer for modems...
++ * also, don't just test 32BIT_MASK, since azx supports
++ * 64-bit DMA in some cases.
++ */
++ /* lennart wants a 2.2MB buffer for 2sec of 48khz */
++ if (pcm->dev_class == SNDRV_PCM_CLASS_GENERIC &&
++ chip->pci->dma_mask >= DMA_32BIT_MASK)
++ prealloc_min = 4 * 1024 * 1024; /* 4MB */
++
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
+ snd_dma_pci_data(chip->pci),
+- 1024 * 64, 32 * 1024 * 1024);
++ prealloc_min, 32 * 1024 * 1024);
+ return 0;
+ }
+
diff --git a/freed-ora/current/F-12/hdpvr-ir-enable.patch b/freed-ora/current/F-12/hdpvr-ir-enable.patch
new file mode 100644
index 000000000..a5c7e922c
--- /dev/null
+++ b/freed-ora/current/F-12/hdpvr-ir-enable.patch
@@ -0,0 +1,213 @@
+diff -Naurp a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
+--- a/drivers/media/video/hdpvr/hdpvr-core.c 2010-07-06 17:36:44.000000000 -0400
++++ b/drivers/media/video/hdpvr/hdpvr-core.c 2010-07-06 17:38:13.000000000 -0400
+@@ -363,9 +363,8 @@ static int hdpvr_probe(struct usb_interf
+ goto error;
+ }
+
+-#ifdef CONFIG_I2C
+- /* until i2c is working properly */
+- retval = 0; /* hdpvr_register_i2c_adapter(dev); */
++#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
++ retval = hdpvr_register_i2c_adapter(dev);
+ if (retval < 0) {
+ v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n");
+ goto error;
+@@ -411,12 +410,9 @@ static void hdpvr_disconnect(struct usb_
+ mutex_unlock(&dev->io_mutex);
+
+ /* deregister I2C adapter */
+-#ifdef CONFIG_I2C
++#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ mutex_lock(&dev->i2c_mutex);
+- if (dev->i2c_adapter)
+- i2c_del_adapter(dev->i2c_adapter);
+- kfree(dev->i2c_adapter);
+- dev->i2c_adapter = NULL;
++ i2c_del_adapter(&dev->i2c_adapter);
+ mutex_unlock(&dev->i2c_mutex);
+ #endif /* CONFIG_I2C */
+
+diff -Naurp a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
+--- a/drivers/media/video/hdpvr/hdpvr.h 2010-02-24 13:52:17.000000000 -0500
++++ b/drivers/media/video/hdpvr/hdpvr.h 2010-07-06 17:42:20.000000000 -0400
+@@ -101,7 +101,7 @@ struct hdpvr_device {
+ struct work_struct worker;
+
+ /* I2C adapter */
+- struct i2c_adapter *i2c_adapter;
++ struct i2c_adapter i2c_adapter;
+ /* I2C lock */
+ struct mutex i2c_mutex;
+
+diff -Naurp a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
+--- a/drivers/media/video/hdpvr/hdpvr-i2c.c 2010-07-06 17:36:51.000000000 -0400
++++ b/drivers/media/video/hdpvr/hdpvr-i2c.c 2010-07-06 17:45:50.000000000 -0400
+@@ -10,6 +10,8 @@
+ *
+ */
+
++#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
++
+ #include <linux/i2c.h>
+
+ #include "hdpvr.h"
+@@ -19,10 +21,13 @@
+
+ #define REQTYPE_I2C_READ 0xb1
+ #define REQTYPE_I2C_WRITE 0xb0
+-#define REQTYPE_I2C_WRITE_STATT 0xd0
++#define REQTYPE_I2C_WRITE_STAT 0xd0
++
++#define HDPVR_HW_Z8F0811_IR_TX_I2C_ADDR 0x70
++#define HDPVR_HW_Z8F0811_IR_RX_I2C_ADDR 0x71
+
+ static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr,
+- char *data, int len)
++ char *data, int len, int bus)
+ {
+ int ret;
+ char *buf = kmalloc(len, GFP_KERNEL);
+@@ -32,7 +37,7 @@ static int hdpvr_i2c_read(struct hdpvr_d
+ ret = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ REQTYPE_I2C_READ, CTRL_READ_REQUEST,
+- 0x100|addr, 0, buf, len, 1000);
++ bus<<8 | addr, 0, buf, len, 1000);
+
+ if (ret == len) {
+ memcpy(data, buf, len);
+@@ -46,7 +51,7 @@ static int hdpvr_i2c_read(struct hdpvr_d
+ }
+
+ static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr,
+- char *data, int len)
++ char *data, int len, int bus)
+ {
+ int ret;
+ char *buf = kmalloc(len, GFP_KERNEL);
+@@ -57,17 +62,17 @@ static int hdpvr_i2c_write(struct hdpvr_
+ ret = usb_control_msg(dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST,
+- 0x100|addr, 0, buf, len, 1000);
++ bus<<8 | addr, 0, buf, len, 1000);
+
+ if (ret < 0)
+ goto error;
+
+ ret = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+- REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST,
++ REQTYPE_I2C_WRITE_STAT, CTRL_READ_REQUEST,
+ 0, 0, buf, 2, 1000);
+
+- if (ret == 2)
++ if (ret == 2 && buf[1] == (len - 1))
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+@@ -93,10 +98,10 @@ static int hdpvr_transfer(struct i2c_ada
+
+ if (msgs[i].flags & I2C_M_RD)
+ retval = hdpvr_i2c_read(dev, addr, msgs[i].buf,
+- msgs[i].len);
++ msgs[i].len, 1);
+ else
+ retval = hdpvr_i2c_write(dev, addr, msgs[i].buf,
+- msgs[i].len);
++ msgs[i].len, 1);
+ }
+
+ mutex_unlock(&dev->i2c_mutex);
+@@ -114,31 +119,61 @@ static struct i2c_algorithm hdpvr_algo =
+ .functionality = hdpvr_functionality,
+ };
+
++static struct i2c_adapter hdpvr_i2c_adap_template = {
++ .name = "Hauppauge HD PVR I2C",
++ .owner = THIS_MODULE,
++ .id = I2C_HW_B_HDPVR,
++ .algo = &hdpvr_algo,
++ .algo_data = NULL,
++ .class = I2C_CLASS_TV_ANALOG,
++};
++
++static struct i2c_board_info hdpvr_i2c_board_info = {
++ I2C_BOARD_INFO("ir_tx_z8f0811_haup", HDPVR_HW_Z8F0811_IR_TX_I2C_ADDR),
++ I2C_BOARD_INFO("ir_rx_z8f0811_haup", HDPVR_HW_Z8F0811_IR_RX_I2C_ADDR),
++};
++
++static int hdpvr_activate_ir(struct hdpvr_device *dev)
++{
++ char buffer[8];
++
++ mutex_lock(&dev->i2c_mutex);
++
++ hdpvr_i2c_read(dev, 0x54, buffer, 1, 0);
++
++ buffer[0] = 0;
++ buffer[1] = 0x8;
++ hdpvr_i2c_write(dev, 0x54, buffer, 2, 1);
++
++ buffer[1] = 0x18;
++ hdpvr_i2c_write(dev, 0x54, buffer, 2, 1);
++
++ mutex_unlock(&dev->i2c_mutex);
++ return 0;
++}
++
++
+ int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
+ {
+- struct i2c_adapter *i2c_adap;
+ int retval = -ENOMEM;
+
+- i2c_adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
+- if (i2c_adap == NULL)
++ hdpvr_activate_ir(dev);
++
++ memcpy(&dev->i2c_adapter, &hdpvr_i2c_adap_template,
++ sizeof(struct i2c_adapter));
++ dev->i2c_adapter.dev.parent = &dev->udev->dev;
++
++ i2c_set_adapdata(&dev->i2c_adapter, dev);
++
++ retval = i2c_add_adapter(&dev->i2c_adapter);
++
++ if (retval)
+ goto error;
+
+- strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
+- sizeof(i2c_adap->name));
+- i2c_adap->algo = &hdpvr_algo;
+- i2c_adap->class = I2C_CLASS_TV_ANALOG;
+- i2c_adap->owner = THIS_MODULE;
+- i2c_adap->dev.parent = &dev->udev->dev;
+-
+- i2c_set_adapdata(i2c_adap, dev);
+-
+- retval = i2c_add_adapter(i2c_adap);
+-
+- if (!retval)
+- dev->i2c_adapter = i2c_adap;
+- else
+- kfree(i2c_adap);
++ i2c_new_device(&dev->i2c_adapter, &hdpvr_i2c_board_info);
+
+ error:
+ return retval;
+ }
++
++#endif /* CONFIG_I2C */
+diff -Naurp a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile
+--- a/drivers/media/video/hdpvr/Makefile 2010-07-06 17:36:38.000000000 -0400
++++ b/drivers/media/video/hdpvr/Makefile 2010-07-06 17:35:17.000000000 -0400
+@@ -1,6 +1,4 @@
+-hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o
+-
+-hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o
++hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-i2c.o hdpvr-video.o
+
+ obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o
+
diff --git a/freed-ora/current/F-12/hid-01-usbhid-initialize-interface-pointers-early-enough.patch b/freed-ora/current/F-12/hid-01-usbhid-initialize-interface-pointers-early-enough.patch
new file mode 100644
index 000000000..d522b3f69
--- /dev/null
+++ b/freed-ora/current/F-12/hid-01-usbhid-initialize-interface-pointers-early-enough.patch
@@ -0,0 +1,40 @@
+commit 57ab12e418ec4fe24c11788bb1bbdabb29d05679
+Author: Jiri Kosina <jkosina at suse.cz>
+Date: Wed Feb 17 14:25:01 2010 +0100
+
+ HID: usbhid: initialize interface pointers early enough
+
+ Move the initialization of USB interface pointers from _start()
+ over to _probe() callback, which is where it belongs.
+
+ This fixes case where interface is NULL when parsing of report
+ descriptor fails.
+
+ LKML-Reference: <20100213135720.603e5f64 at neptune.home>
+ Reported-by: Alan Stern <stern at rowland.harvard.edu>
+ Tested-by: Bruno Prémont <bonbons at linux-vserver.org>
+ Signed-off-by: Jiri Kosina <jkosina at suse.cz>
+
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 74bd3ca..ceaf4a1 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -1005,9 +1005,6 @@ static int usbhid_start(struct hid_device *hid)
+
+ spin_lock_init(&usbhid->lock);
+
+- usbhid->intf = intf;
+- usbhid->ifnum = interface->desc.bInterfaceNumber;
+-
+ usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
+ if (!usbhid->urbctrl) {
+ ret = -ENOMEM;
+@@ -1178,6 +1175,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
+
+ hid->driver_data = usbhid;
+ usbhid->hid = hid;
++ usbhid->intf = intf;
++ usbhid->ifnum = interface->desc.bInterfaceNumber;
+
+ ret = hid_add_device(hid);
+ if (ret) {
diff --git a/freed-ora/current/F-12/hid-02-fix-suspend-crash-by-moving-initializations-earlier.patch b/freed-ora/current/F-12/hid-02-fix-suspend-crash-by-moving-initializations-earlier.patch
new file mode 100644
index 000000000..bbd388030
--- /dev/null
+++ b/freed-ora/current/F-12/hid-02-fix-suspend-crash-by-moving-initializations-earlier.patch
@@ -0,0 +1,53 @@
+commit fde4e2f73208b8f34f123791e39c0cb6bc74b32a
+Author: Alan Stern <stern at rowland.harvard.edu>
+Date: Fri May 7 10:41:10 2010 -0400
+
+ HID: fix suspend crash by moving initializations earlier
+
+ Although the usbhid driver allocates its usbhid structure in the probe
+ routine, several critical fields in that structure don't get
+ initialized until usbhid_start(). However if report descriptor
+ parsing fails then usbhid_start() is never called. This leads to
+ problems during system suspend -- the system will freeze.
+
+ This patch (as1378) fixes the bug by moving the initialization
+ statements up into usbhid_probe().
+
+ Signed-off-by: Alan Stern <stern at rowland.harvard.edu>
+ Reported-by: Bruno Prémont <bonbons at linux-vserver.org>
+ Tested-By: Bruno Prémont <bonbons at linux-vserver.org>
+ Signed-off-by: Jiri Kosina <jkosina at suse.cz>
+
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 56d06cd..7b85b69 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -999,13 +999,6 @@ static int usbhid_start(struct hid_device *hid)
+ }
+ }
+
+- init_waitqueue_head(&usbhid->wait);
+- INIT_WORK(&usbhid->reset_work, hid_reset);
+- INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
+- setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
+-
+- spin_lock_init(&usbhid->lock);
+-
+ usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
+ if (!usbhid->urbctrl) {
+ ret = -ENOMEM;
+@@ -1179,6 +1172,12 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
+ usbhid->intf = intf;
+ usbhid->ifnum = interface->desc.bInterfaceNumber;
+
++ init_waitqueue_head(&usbhid->wait);
++ INIT_WORK(&usbhid->reset_work, hid_reset);
++ INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
++ setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
++ spin_lock_init(&usbhid->lock);
++
+ ret = hid_add_device(hid);
+ if (ret) {
+ if (ret != -ENODEV)
+
+
diff --git a/freed-ora/current/F-12/ice1712-fix-revo71-mixer-names.patch b/freed-ora/current/F-12/ice1712-fix-revo71-mixer-names.patch
new file mode 100644
index 000000000..329651076
--- /dev/null
+++ b/freed-ora/current/F-12/ice1712-fix-revo71-mixer-names.patch
@@ -0,0 +1,42 @@
+diff --git a/sound/pci/ice1712/revo.c b/sound/pci/ice1712/revo.c
+index b508bb3..dc13e2d 100644
+--- a/sound/pci/ice1712/revo.c
++++ b/sound/pci/ice1712/revo.c
+@@ -197,26 +197,26 @@ static int revo51_i2c_init(struct snd_ice1712 *ice,
+
+ static const struct snd_akm4xxx_dac_channel revo71_front[] = {
+ {
+- .name = "PCM Playback Volume",
++ .name = "Front Playback Volume",
+ .num_channels = 2,
+ /* front channels DAC supports muting */
+- .switch_name = "PCM Playback Switch",
++ .switch_name = "Front Playback Switch",
+ },
+ };
+
+ static const struct snd_akm4xxx_dac_channel revo71_surround[] = {
+- AK_DAC("PCM Center Playback Volume", 1),
+- AK_DAC("PCM LFE Playback Volume", 1),
+- AK_DAC("PCM Side Playback Volume", 2),
+- AK_DAC("PCM Rear Playback Volume", 2),
++ AK_DAC("Center Playback Volume", 1),
++ AK_DAC("LFE Playback Volume", 1),
++ AK_DAC("Side Playback Volume", 2),
++ AK_DAC("Rear Playback Volume", 2),
+ };
+
+ static const struct snd_akm4xxx_dac_channel revo51_dac[] = {
+- AK_DAC("PCM Playback Volume", 2),
+- AK_DAC("PCM Center Playback Volume", 1),
+- AK_DAC("PCM LFE Playback Volume", 1),
+- AK_DAC("PCM Rear Playback Volume", 2),
+- AK_DAC("PCM Headphone Volume", 2),
++ AK_DAC("Front Playback Volume", 2),
++ AK_DAC("Center Playback Volume", 1),
++ AK_DAC("LFE Playback Volume", 1),
++ AK_DAC("Rear Playback Volume", 2),
++ AK_DAC("Headphone Volume", 2),
+ };
+
+ static const char *revo51_adc_input_names[] = {
diff --git a/freed-ora/current/F-12/inotify-fix-inotify-oneshot-support.patch b/freed-ora/current/F-12/inotify-fix-inotify-oneshot-support.patch
new file mode 100644
index 000000000..ba63e1090
--- /dev/null
+++ b/freed-ora/current/F-12/inotify-fix-inotify-oneshot-support.patch
@@ -0,0 +1,25 @@
+#607327
+
+During the large inotify rewrite to fsnotify I completely dropped support
+for IN_ONESHOT. Reimplement that support.
+
+Signed-off-by: Eric Paris <eparis@redhat.com>
+---
+
+ fs/notify/inotify/inotify_fsnotify.c | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
+index daa666a..388a150 100644
+--- a/fs/notify/inotify/inotify_fsnotify.c
++++ b/fs/notify/inotify/inotify_fsnotify.c
+@@ -126,6 +126,9 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
+ ret = 0;
+ }
+
++ if (entry->mask & IN_ONESHOT)
++ fsnotify_destroy_mark_by_entry(entry);
++
+ /*
+ * If we hold the entry until after the event is on the queue
+ * IN_IGNORED won't be able to pass this event in the queue
diff --git a/freed-ora/current/F-12/inotify-send-IN_UNMOUNT-events.patch b/freed-ora/current/F-12/inotify-send-IN_UNMOUNT-events.patch
new file mode 100644
index 000000000..cf1d4c4bf
--- /dev/null
+++ b/freed-ora/current/F-12/inotify-send-IN_UNMOUNT-events.patch
@@ -0,0 +1,29 @@
+#607327 ?
+
+Since the .31 or so notify rewrite inotify has not sent events about
+inodes which are unmounted. This patch restores those events.
+
+Signed-off-by: Eric Paris <eparis@redhat.com>
+---
+
+ fs/notify/inotify/inotify_user.c | 7 +++++--
+ 1 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 44aeb0f..f381daf 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -90,8 +90,11 @@ static inline __u32 inotify_arg_to_mask(u32 arg)
+ {
+ __u32 mask;
+
+- /* everything should accept their own ignored and cares about children */
+- mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
++ /*
++ * everything should accept their own ignored, cares about children,
++ * and should receive events when the inode is unmounted
++ */
++ mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
+
+ /* mask off the flags used to open the fd */
+ mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
diff --git a/freed-ora/current/F-12/iwlwifi-fix-internal-scan-race.patch b/freed-ora/current/F-12/iwlwifi-fix-internal-scan-race.patch
new file mode 100644
index 000000000..18b315601
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi-fix-internal-scan-race.patch
@@ -0,0 +1,123 @@
+From reinette.chatre@intel.com Thu May 13 17:49:59 2010
+Return-path: <reinette.chatre@intel.com>
+Envelope-to: linville@tuxdriver.com
+Delivery-date: Thu, 13 May 2010 17:49:59 -0400
+Received: from mga09.intel.com ([134.134.136.24])
+ by smtp.tuxdriver.com with esmtp (Exim 4.63)
+ (envelope-from <reinette.chatre@intel.com>)
+ id 1OCgI1-0007H3-Eg
+ for linville@tuxdriver.com; Thu, 13 May 2010 17:49:59 -0400
+Received: from orsmga002.jf.intel.com ([10.7.209.21])
+ by orsmga102.jf.intel.com with ESMTP; 13 May 2010 14:48:04 -0700
+X-ExtLoop1: 1
+X-IronPort-AV: E=Sophos;i="4.53,224,1272870000";
+ d="scan'208";a="517743256"
+Received: from rchatre-desk.amr.corp.intel.com.jf.intel.com (HELO localhost.localdomain) ([134.134.15.94])
+ by orsmga002.jf.intel.com with ESMTP; 13 May 2010 14:49:12 -0700
+From: Reinette Chatre <reinette.chatre@intel.com>
+To: linville@tuxdriver.com
+Cc: linux-wireless@vger.kernel.org, ipw3945-devel@lists.sourceforge.net, Reinette Chatre <reinette.chatre@intel.com>
+Subject: [PATCH 1/2] iwlwifi: fix internal scan race
+Date: Thu, 13 May 2010 14:49:44 -0700
+Message-Id: <1273787385-9248-2-git-send-email-reinette.chatre@intel.com>
+X-Mailer: git-send-email 1.6.3.3
+In-Reply-To: <1273787385-9248-1-git-send-email-reinette.chatre@intel.com>
+References: <1273787385-9248-1-git-send-email-reinette.chatre@intel.com>
+X-Spam-Score: -4.2 (----)
+X-Spam-Status: No
+Status: RO
+Content-Length: 3370
+Lines: 91
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+It is possible for internal scan to race against itself if the device is
+not returning the scan results from first requests. What happens in this
+case is the cleanup done during the abort of the first internal scan also
+cleans up part of the new scan, causing it to access memory it shouldn't.
+
+Here are details:
+* First internal scan is triggered and scan command sent to device.
+* After seven seconds there is no scan results so the watchdog timer
+ triggers a scan abort.
+* The scan abort succeeds and a SCAN_COMPLETE_NOTIFICATION is received for
+ failed scan.
+* During processing of SCAN_COMPLETE_NOTIFICATION we clear STATUS_SCANNING
+ and queue the "scan_completed" work.
+** At this time, since the problem that caused the internal scan in first
+ place is still present, a new internal scan is triggered.
+The behavior at this point is a bit different between 2.6.34 and 2.6.35
+since 2.6.35 has a lot of this synchronized. The rest of the race
+description will thus be generalized.
+** As part of preparing for the scan "is_internal_short_scan" is set to
+true.
+* At this point the completion work for fist scan is run. As part of this
+ there is some locking missing around the "is_internal_short_scan"
+ variable and it is set to "false".
+** Now the second scan runs and it considers itself a real (not internal0
+ scan and thus causes problems with wrong memory being accessed.
+
+The fix is twofold.
+* Since "is_internal_short_scan" should be protected by mutex, fix this in
+ scan completion work so that changes to it can be serialized.
+* Do not queue a new internal scan if one is in progress.
+
+This fixes https://bugzilla.kernel.org/show_bug.cgi?id=15824
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+---
+ drivers/net/wireless/iwlwifi/iwl-scan.c | 21 ++++++++++++++++++---
+ 1 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
+index 2367286..a2c4855 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
+@@ -560,6 +560,11 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
+
+ mutex_lock(&priv->mutex);
+
++ if (priv->is_internal_short_scan == true) {
++ IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
++ goto unlock;
++ }
++
+ if (!iwl_is_ready_rf(priv)) {
+ IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
+ goto unlock;
+@@ -957,17 +962,27 @@ void iwl_bg_scan_completed(struct work_struct *work)
+ {
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, scan_completed);
++ bool internal = false;
+
+ IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
+
+ cancel_delayed_work(&priv->scan_check);
+
+- if (!priv->is_internal_short_scan)
+- ieee80211_scan_completed(priv->hw, false);
+- else {
++ mutex_lock(&priv->mutex);
++ if (priv->is_internal_short_scan) {
+ priv->is_internal_short_scan = false;
+ IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
++ internal = true;
+ }
++ mutex_unlock(&priv->mutex);
++
++ /*
++ * Do not hold mutex here since this will cause mac80211 to call
++ * into driver again into functions that will attempt to take
++ * mutex.
++ */
++ if (!internal)
++ ieee80211_scan_completed(priv->hw, false);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+--
+1.6.3.3
+
+
+
diff --git a/freed-ora/current/F-12/iwlwifi-fix-scan-races.patch b/freed-ora/current/F-12/iwlwifi-fix-scan-races.patch
new file mode 100644
index 000000000..2e00f00ad
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi-fix-scan-races.patch
@@ -0,0 +1,139 @@
+commit 88be026490ed89c2ffead81a52531fbac5507e01
+Author: Johannes Berg <johannes.berg@intel.com>
+Date: Wed Apr 7 00:21:36 2010 -0700
+
+ iwlwifi: fix scan races
+
+ When an internal scan is started, nothing protects the
+ is_internal_short_scan variable which can cause crashes,
+ cf. https://bugzilla.kernel.org/show_bug.cgi?id=15667.
+ Fix this by making the short scan request use the mutex
+ for locking, which requires making the request go to a
+ work struct so that it can sleep.
+
+ Reported-by: Peter Zijlstra <peterz@infradead.org>
+ Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index e4c2e1e..ba0fdba 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -3330,6 +3330,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
+
+ cancel_delayed_work_sync(&priv->init_alive_start);
+ cancel_delayed_work(&priv->scan_check);
++ cancel_work_sync(&priv->start_internal_scan);
+ cancel_delayed_work(&priv->alive_start);
+ cancel_work_sync(&priv->beacon_update);
+ del_timer_sync(&priv->statistics_periodic);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
+index 894bcb8..1459cdb 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.c
++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
+@@ -3357,7 +3357,6 @@ static void iwl_force_rf_reset(struct iwl_priv *priv)
+ */
+ IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+ iwl_internal_short_hw_scan(priv);
+- return;
+ }
+
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
+index 732590f..36940a9 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.h
++++ b/drivers/net/wireless/iwlwifi/iwl-core.h
+@@ -506,7 +506,7 @@ void iwl_init_scan_params(struct iwl_priv *priv);
+ int iwl_scan_cancel(struct iwl_priv *priv);
+ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+ int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+-int iwl_internal_short_hw_scan(struct iwl_priv *priv);
++void iwl_internal_short_hw_scan(struct iwl_priv *priv);
+ int iwl_force_reset(struct iwl_priv *priv, int mode);
+ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+ const u8 *ie, int ie_len, int left);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
+index 6054c5f..ef1720a 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
+@@ -1296,6 +1296,7 @@ struct iwl_priv {
+ struct work_struct tt_work;
+ struct work_struct ct_enter;
+ struct work_struct ct_exit;
++ struct work_struct start_internal_scan;
+
+ struct tasklet_struct irq_tasklet;
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
+index bd2f7c4..5062f4e 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
+@@ -469,6 +469,8 @@ EXPORT_SYMBOL(iwl_init_scan_params);
+
+ static int iwl_scan_initiate(struct iwl_priv *priv)
+ {
++ WARN_ON(!mutex_is_locked(&priv->mutex));
++
+ IWL_DEBUG_INFO(priv, "Starting scan...\n");
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = false;
+@@ -546,24 +548,31 @@ EXPORT_SYMBOL(iwl_mac_hw_scan);
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+-int iwl_internal_short_hw_scan(struct iwl_priv *priv)
++void iwl_internal_short_hw_scan(struct iwl_priv *priv)
+ {
+- int ret = 0;
++ queue_work(priv->workqueue, &priv->start_internal_scan);
++}
++
++static void iwl_bg_start_internal_scan(struct work_struct *work)
++{
++ struct iwl_priv *priv =
++ container_of(work, struct iwl_priv, start_internal_scan);
++
++ mutex_lock(&priv->mutex);
+
+ if (!iwl_is_ready_rf(priv)) {
+- ret = -EIO;
+ IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
+- goto out;
++ goto unlock;
+ }
++
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+- ret = -EAGAIN;
+- goto out;
++ goto unlock;
+ }
++
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+- ret = -EAGAIN;
+- goto out;
++ goto unlock;
+ }
+
+ priv->scan_bands = 0;
+@@ -576,9 +585,8 @@ int iwl_internal_short_hw_scan(struct iwl_priv *priv)
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = true;
+ queue_work(priv->workqueue, &priv->request_scan);
+-
+-out:
+- return ret;
++ unlock:
++ mutex_unlock(&priv->mutex);
+ }
+ EXPORT_SYMBOL(iwl_internal_short_hw_scan);
+
+@@ -964,6 +972,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
+ INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
+ INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
+ INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
++ INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
+ INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
+ }
+ EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
diff --git a/freed-ora/current/F-12/iwlwifi-manage-QoS-by-mac-stack.patch b/freed-ora/current/F-12/iwlwifi-manage-QoS-by-mac-stack.patch
new file mode 100644
index 000000000..9ac9971c2
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi-manage-QoS-by-mac-stack.patch
@@ -0,0 +1,100 @@
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+To: kernel@lists.fedoraproject.org, "John W. Linville" <linville@redhat.com>
+Subject: [PATCH 1/4 2.6.32.y] mac80211: explicitly disable/enable QoS
+Date: Fri, 11 Jun 2010 17:03:13 +0200
+
+Add interface to disable/enable QoS (aka WMM or WME). Currently drivers
+enable it explicitly when ->conf_tx method is called, and newer disable.
+Disabling is needed for some APs, which do not support QoS, such
+we should send QoS frames to them.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+---
+ include/net/mac80211.h | 5 +++++
+ net/mac80211/mlme.c | 9 ++++++++-
+ net/mac80211/util.c | 5 +++++
+ 3 files changed, 18 insertions(+), 1 deletions(-)
+
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index c39ed07..de904fc 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -572,11 +572,15 @@ struct ieee80211_rx_status {
+ * may turn the device off as much as possible. Typically, this flag will
+ * be set when an interface is set UP but not associated or scanning, but
+ * it can also be unset in that case when monitor interfaces are active.
++ * @IEEE80211_CONF_QOS: Enable 802.11e QoS also know as WMM (Wireless
++ * Multimedia). On some drivers (iwlwifi is one of know) we have
++ * to enable/disable QoS explicitly.
+ */
+ enum ieee80211_conf_flags {
+ IEEE80211_CONF_RADIOTAP = (1<<0),
+ IEEE80211_CONF_PS = (1<<1),
+ IEEE80211_CONF_IDLE = (1<<2),
++ IEEE80211_CONF_QOS = (1<<3),
+ };
+
+
+@@ -599,6 +603,7 @@ enum ieee80211_conf_changed {
+ IEEE80211_CONF_CHANGE_CHANNEL = BIT(6),
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7),
+ IEEE80211_CONF_CHANGE_IDLE = BIT(8),
++ IEEE80211_CONF_CHANGE_QOS = BIT(9),
+ };
+
+ /**
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 4a15df1..d3950b7 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -786,6 +786,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
+ int count;
+ u8 *pos;
+
++ if (!local->ops->conf_tx)
++ return;
++
+ if (!(ifmgd->flags & IEEE80211_STA_WMM_ENABLED))
+ return;
+
+@@ -844,11 +847,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
+ wiphy_name(local->hw.wiphy), queue, aci, acm,
+ params.aifs, params.cw_min, params.cw_max, params.txop);
+ #endif
+- if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
++ if (drv_conf_tx(local, queue, &params))
+ printk(KERN_DEBUG "%s: failed to set TX queue "
+ "parameters for queue %d\n",
+ wiphy_name(local->hw.wiphy), queue);
+ }
++
++ /* enable WMM or activate new settings */
++ local->hw.conf.flags |= IEEE80211_CONF_QOS;
++ drv_config(local, IEEE80211_CONF_CHANGE_QOS);
+ }
+
+ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 31b1085..21f11cc 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -791,6 +791,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
+
+ drv_conf_tx(local, queue, &qparam);
+ }
++
++ /* after reinitialize QoS TX queues setting to default,
++ * disable QoS at all */
++ local->hw.conf.flags &= ~IEEE80211_CONF_QOS;
++ drv_config(local, IEEE80211_CONF_CHANGE_QOS);
+ }
+
+ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
+--
+1.6.2.5
+
+_______________________________________________
+kernel mailing list
+kernel@lists.fedoraproject.org
+https://admin.fedoraproject.org/mailman/listinfo/kernel
+
diff --git a/freed-ora/current/F-12/iwlwifi-recover_from_tx_stall.patch b/freed-ora/current/F-12/iwlwifi-recover_from_tx_stall.patch
new file mode 100644
index 000000000..0b69e44f5
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi-recover_from_tx_stall.patch
@@ -0,0 +1,13 @@
+https://bugzilla.redhat.com/show_bug.cgi?id=589777#c5
+
+diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c
+--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c.orig 2010-05-19 16:07:15.000000000 -0400
++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c 2010-05-19 16:09:42.000000000 -0400
+@@ -2794,6 +2794,7 @@ static struct iwl_lib_ops iwl3945_lib =
+ .post_associate = iwl3945_post_associate,
+ .isr = iwl_isr_legacy,
+ .config_ap = iwl3945_config_ap,
++ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ };
+
+ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
diff --git a/freed-ora/current/F-12/iwlwifi-reset-card-during-probe.patch b/freed-ora/current/F-12/iwlwifi-reset-card-during-probe.patch
new file mode 100644
index 000000000..74345dceb
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi-reset-card-during-probe.patch
@@ -0,0 +1,167 @@
+From linville@redhat.com Mon Mar 29 14:49:37 2010
+Return-path: <linville@redhat.com>
+Envelope-to: linville@tuxdriver.com
+Delivery-date: Mon, 29 Mar 2010 14:49:37 -0400
+Received: from mx1.redhat.com ([209.132.183.28])
+ by smtp.tuxdriver.com with esmtp (Exim 4.63)
+ (envelope-from <linville@redhat.com>)
+ id 1NwK1n-0004Zz-SW
+ for linville@tuxdriver.com; Mon, 29 Mar 2010 14:49:37 -0400
+Received: from int-mx04.intmail.prod.int.phx2.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.17])
+ by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o2TInYO7028996
+ (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK)
+ for <linville@tuxdriver.com>; Mon, 29 Mar 2010 14:49:35 -0400
+Received: from savage.usersys.redhat.com (savage.devel.redhat.com [10.11.231.4])
+ by int-mx04.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o2TInX27023483
+ (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO)
+ for <linville@tuxdriver.com>; Mon, 29 Mar 2010 14:49:33 -0400
+Received: from savage.usersys.redhat.com (localhost.localdomain [127.0.0.1])
+ by savage.usersys.redhat.com (8.13.1/8.13.1) with ESMTP id o2TInXPn000652
+ for <linville@tuxdriver.com>; Mon, 29 Mar 2010 14:49:33 -0400
+Received: (from linville@localhost)
+ by savage.usersys.redhat.com (8.13.1/8.13.1/Submit) id o2TInWt7000651
+ for linville@tuxdriver.com; Mon, 29 Mar 2010 14:49:32 -0400
+Resent-Message-Id: <201003291849.o2TInWt7000651@savage.usersys.redhat.com>
+Received: from zmta03.collab.prod.int.phx2.redhat.com (LHLO
+ zmta03.collab.prod.int.phx2.redhat.com) (10.5.5.33) by
+ mail03.corp.redhat.com with LMTP; Fri, 26 Mar 2010 06:05:51 -0400 (EDT)
+Received: from localhost (localhost.localdomain [127.0.0.1])
+ by zmta03.collab.prod.int.phx2.redhat.com (Postfix) with ESMTP id 038004CBE9;
+ Fri, 26 Mar 2010 06:05:51 -0400 (EDT)
+Received: from zmta03.collab.prod.int.phx2.redhat.com ([127.0.0.1])
+ by localhost (zmta03.collab.prod.int.phx2.redhat.com [127.0.0.1]) (amavisd-new, port 10024)
+ with ESMTP id IVjBQyibLBw2; Fri, 26 Mar 2010 06:05:50 -0400 (EDT)
+Received: from int-mx04.intmail.prod.int.phx2.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.17])
+ by zmta03.collab.prod.int.phx2.redhat.com (Postfix) with ESMTP id BF0144CBE7;
+ Fri, 26 Mar 2010 06:05:50 -0400 (EDT)
+Received: from mx1.redhat.com (ext-mx08.extmail.prod.ext.phx2.redhat.com [10.5.110.12])
+ by int-mx04.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o2QA5m7L004056;
+ Fri, 26 Mar 2010 06:05:49 -0400
+Received: from bastion.fedoraproject.org (bastion.phx2.fedoraproject.org [10.5.126.11])
+ by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o2QA5bS2028477;
+ Fri, 26 Mar 2010 06:05:37 -0400
+Received: from lists.fedoraproject.org (collab1.vpn.fedoraproject.org [192.168.1.21])
+ by bastion02.phx2.fedoraproject.org (Postfix) with ESMTP id 16EF710F96C;
+ Fri, 26 Mar 2010 10:05:37 +0000 (UTC)
+Received: from collab1.fedoraproject.org (localhost.localdomain [127.0.0.1])
+ by lists.fedoraproject.org (Postfix) with ESMTP id 1C8C93267AC;
+ Fri, 26 Mar 2010 10:05:19 +0000 (UTC)
+X-Original-To: kernel@lists.fedoraproject.org
+Delivered-To: kernel@lists.fedoraproject.org
+Received: from smtp-mm1.fedoraproject.org (smtp-mm1.fedoraproject.org
+ [80.239.156.217])
+ by lists.fedoraproject.org (Postfix) with ESMTP id 5FD26326780
+ for <kernel@lists.fedoraproject.org>;
+ Fri, 26 Mar 2010 10:05:14 +0000 (UTC)
+Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28])
+ by smtp-mm1.fedoraproject.org (Postfix) with ESMTP id 9BB6A87E5F
+ for <kernel@lists.fedoraproject.org>;
+ Fri, 26 Mar 2010 10:05:13 +0000 (UTC)
+Received: from int-mx08.intmail.prod.int.phx2.redhat.com
+ (int-mx08.intmail.prod.int.phx2.redhat.com [10.5.11.21])
+ by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o2QA5CbS005173
+ (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK);
+ Fri, 26 Mar 2010 06:05:12 -0400
+Received: from localhost (dhcp-0-189.brq.redhat.com [10.34.0.189])
+ by int-mx08.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
+ id o2QA5BKo028563; Fri, 26 Mar 2010 06:05:11 -0400
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+To: kernel@lists.fedoraproject.org
+Subject: [PATCH 2/3] iwlwifi: reset card during probe
+Date: Fri, 26 Mar 2010 11:03:26 +0100
+Message-Id: <1269597807-2925-2-git-send-email-sgruszka@redhat.com>
+In-Reply-To: <1269597807-2925-1-git-send-email-sgruszka@redhat.com>
+References: <1269597807-2925-1-git-send-email-sgruszka@redhat.com>
+X-Scanned-By: MIMEDefang 2.67 on 10.5.11.17
+X-Scanned-By: MIMEDefang 2.67 on 10.5.11.17
+X-Scanned-By: MIMEDefang 2.67 on 10.5.110.12
+X-Scanned-By: MIMEDefang 2.67 on 10.5.11.21
+Cc: Stanislaw Gruszka <sgruszka@redhat.com>,
+ "John W. Linville" <linville@tuxdriver.com>
+X-BeenThere: kernel@lists.fedoraproject.org
+X-Mailman-Version: 2.1.9
+Precedence: list
+List-Id: "Fedora kernel development." <kernel.lists.fedoraproject.org>
+List-Unsubscribe: <https://admin.fedoraproject.org/mailman/listinfo/kernel>,
+ <mailto:kernel-request@lists.fedoraproject.org?subject=unsubscribe>
+List-Archive: <http://lists.fedoraproject.org/pipermail/kernel>
+List-Post: <mailto:kernel@lists.fedoraproject.org>
+List-Help: <mailto:kernel-request@lists.fedoraproject.org?subject=help>
+List-Subscribe: <https://admin.fedoraproject.org/mailman/listinfo/kernel>,
+ <mailto:kernel-request@lists.fedoraproject.org?subject=subscribe>
+MIME-Version: 1.0
+Content-Type: text/plain; charset="us-ascii"
+Content-Transfer-Encoding: 7bit
+Sender: kernel-bounces@lists.fedoraproject.org
+Errors-To: kernel-bounces@lists.fedoraproject.org
+X-RedHat-Spam-Score: -0.01 (T_RP_MATCHES_RCVD)
+Resent-From: linville@redhat.com
+Resent-Date: Mon, 29 Mar 2010 14:49:32 -0400
+Resent-To: linville@tuxdriver.com
+X-Spam-Score: -8.8 (--------)
+X-Spam-Status: No
+Content-Length: 2455
+Lines: 61
+
+RHBZ#557084
+
+To ensure that card is in a sane state during probe we add a reset call.
+This change was prompted by users of kdump who was not able to bring up the
+wireless driver in the kdump kernel. The problem here was that the primary
+kernel, which is not running at the time, left the wireless card up and
+running. When the kdump kernel starts it is thus possible to immediately
+receive interrupts from firmware after registering interrupt, but without
+being ready to deal with interrupts from firmware yet.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+---
+ drivers/net/wireless/iwlwifi/iwl-agn.c | 8 ++++++++
+ drivers/net/wireless/iwlwifi/iwl3945-base.c | 7 +++++++
+ 2 files changed, 15 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index 921dc4a..1661f3c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -2976,6 +2976,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ * we should init now
+ */
+ spin_lock_init(&priv->reg_lock);
++
++ /*
++ * stop and reset the on-board processor just in case it is in a
++ * strange state ... like being left stranded by a primary kernel
++ * and this is now the kdump kernel trying to start up
++ */
++ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
++
+ iwl_hw_detect(priv);
+ IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
+ priv->cfg->name, priv->hw_rev);
+diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+index 5f26c93..3726b01 100644
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -4032,6 +4032,13 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
+ IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
+ priv->cfg->name);
+
++ /*
++ * stop and reset the on-board processor just in case it is in a
++ * strange state ... like being left stranded by a primary kernel
++ * and this is now the kdump kernel trying to start up
++ */
++ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
++
+ /***********************
+ * 7. Setup Services
+ * ********************/
+--
+1.6.2.5
+
+_______________________________________________
+kernel mailing list
+kernel@lists.fedoraproject.org
+https://admin.fedoraproject.org/mailman/listinfo/kernel
+
+
diff --git a/freed-ora/current/F-12/iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch b/freed-ora/current/F-12/iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch
new file mode 100644
index 000000000..f1adf0fd0
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch
@@ -0,0 +1,38 @@
+Back-port of the following upstream commit...
+
+commit 6c3872e1d52290dcd506473028867cacc6b7393d
+Author: Trieu 'Andrew' Nguyen <trieux.t.nguyen@intel.com>
+Date: Mon Feb 8 13:53:05 2010 -0800
+
+ iwlwifi: Adjusting PLCP error threshold for 1000 NIC
+
+ While testing the station with the NIC 1000 family, it is found that
+ the plcp error can easily exceed 50 value in 100mSecs. This creates
+ unneccessary radio reset/tuning. This patch raises the PLCP error
+ threshold of the NIC 1000 from 50 to 200 error count.
+
+ Signed-off-by: Trieu 'Andrew' Nguyen <trieux.t.nguyen@intel.com>
+ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig 2010-03-22 14:23:01.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c 2010-03-22 15:33:38.000000000 -0400
+@@ -162,6 +162,6 @@ struct iwl_cfg iwl1000_bgn_cfg = {
+ .shadow_ram_support = false,
+ .ht_greenfield_support = true,
+ .use_rts_for_ht = true, /* use rts/cts protection */
+- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+ };
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-03-22 15:24:28.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-03-22 15:33:00.000000000 -0400
+@@ -970,6 +970,7 @@ struct traffic_stats {
+ #define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0)
+ #define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+ #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
++#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
+ #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+
+ enum iwl_reset {
diff --git a/freed-ora/current/F-12/iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch b/freed-ora/current/F-12/iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch
new file mode 100644
index 000000000..e9f16239e
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch
@@ -0,0 +1,82 @@
+Back-port of the following upstream commit...
+
+commit d4d59e88cb746165c6fe33eacb6f582d525c6ef1
+Author: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Date: Fri Jan 22 14:22:45 2010 -0800
+
+ iwlwifi: Logic to control how frequent radio should be reset if needed
+
+ Add additional logic for internal scan routine to control how
+ frequent this function should be performed.
+
+ The intent of this function is to reset/re-tune the radio and bring the
+ RF/PHY back to normal state, it does not make sense calling it too
+ frequent,
+ if reset the radio can not bring it back to normal state, it indicate
+ there are other reason to cause the radio not operate correctly.
+
+ Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+ Signed-off-by: John W. Linville <linville@tuxdriver.com>
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-03-22 11:26:18.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-03-22 14:12:32.000000000 -0400
+@@ -1013,6 +1013,7 @@ struct iwl_priv {
+ unsigned long scan_start;
+ unsigned long scan_pass_start;
+ unsigned long scan_start_tsf;
++ unsigned long last_internal_scan_jiffies;
+ void *scan;
+ int scan_bands;
+ struct cfg80211_scan_request *scan_request;
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig 2010-03-22 11:26:18.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c 2010-03-22 14:15:28.000000000 -0400
+@@ -206,7 +206,8 @@ static void iwl_rx_scan_results_notif(st
+ #endif
+
+ priv->last_scan_jiffies = jiffies;
+- priv->next_scan_jiffies = 0;
++ if (!priv->is_internal_short_scan)
++ priv->next_scan_jiffies = 0;
+ }
+
+ /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
+@@ -252,8 +253,11 @@ static void iwl_rx_scan_complete_notif(s
+ goto reschedule;
+ }
+
+- priv->last_scan_jiffies = jiffies;
+- priv->next_scan_jiffies = 0;
++ if (!priv->is_internal_short_scan)
++ priv->next_scan_jiffies = 0;
++ else
++ priv->last_internal_scan_jiffies = jiffies;
++
+ IWL_DEBUG_INFO(priv, "Setting scan to off\n");
+
+ clear_bit(STATUS_SCANNING, &priv->status);
+@@ -560,6 +564,8 @@ EXPORT_SYMBOL(iwl_mac_hw_scan);
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
++#define IWL_DELAY_NEXT_INTERNAL_SCAN (HZ*1)
++
+ int iwl_internal_short_hw_scan(struct iwl_priv *priv)
+ {
+ int ret = 0;
+@@ -579,6 +585,13 @@ int iwl_internal_short_hw_scan(struct iw
+ ret = -EAGAIN;
+ goto out;
+ }
++ if (priv->last_internal_scan_jiffies &&
++ time_after(priv->last_internal_scan_jiffies +
++ IWL_DELAY_NEXT_INTERNAL_SCAN, jiffies)) {
++ IWL_DEBUG_SCAN(priv, "internal scan rejected\n");
++ goto out;
++ }
++
+ priv->scan_bands = 0;
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
diff --git a/freed-ora/current/F-12/iwlwifi_-Recover-TX-flow-failure.patch b/freed-ora/current/F-12/iwlwifi_-Recover-TX-flow-failure.patch
new file mode 100644
index 000000000..52bdd9fb4
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-Recover-TX-flow-failure.patch
@@ -0,0 +1,137 @@
+This patch is not upstream yet...
+
+From 34c75818bfcd65e54fed9fe852fc41aba8cf233d Mon Sep 17 00:00:00 2001
+From: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Date: Thu, 4 Mar 2010 13:38:59 -0800
+Subject: [PATCH 15/17] iwlwifi: Recover TX flow failure
+
+Monitors the tx statistics to detect the drop in throughput.
+When the throughput drops, the ratio of the actual_ack_count and the
+expected_ack_count also drops. At the same time, the aggregated
+ba_timeout (the number of ba timeout retries) also rises. If the
+actual_ack_count/expected_ack_count ratio is 0 and the number of ba
+timeout retries rises to BA_TIMEOUT_MAX, no tx packets can be delivered.
+Reloading the uCode and bring the system back to normal operational
+state.
+
+Signed-off-by: Trieu 'Andrew' Nguyen <trieux.t.nguyen@intel.com>
+Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig 2010-03-22 15:48:54.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c 2010-03-22 16:06:01.000000000 -0400
+@@ -2559,10 +2559,21 @@ static int iwl_mac_ampdu_action(struct i
+ return ret;
+ case IEEE80211_AMPDU_TX_START:
+ IWL_DEBUG_HT(priv, "start Tx\n");
+- return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
++ ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn);
++ if (ret == 0) {
++ priv->_agn.agg_tids_count++;
++ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
++ priv->_agn.agg_tids_count);
++ }
++ return ret;
+ case IEEE80211_AMPDU_TX_STOP:
+ IWL_DEBUG_HT(priv, "stop Tx\n");
+ ret = iwl_tx_agg_stop(priv, sta->addr, tid);
++ if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
++ priv->_agn.agg_tids_count--;
++ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
++ priv->_agn.agg_tids_count);
++ }
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return 0;
+ else
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig 2010-03-22 16:08:56.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c 2010-03-22 16:08:53.000000000 -0400
+@@ -1494,6 +1494,7 @@ int iwl_init_drv(struct iwl_priv *priv)
+ priv->band = IEEE80211_BAND_2GHZ;
+
+ priv->iw_mode = NL80211_IFTYPE_STATION;
++ priv->_agn.agg_tids_count = 0;
+
+ priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-03-22 15:48:54.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-03-22 16:06:01.000000000 -0400
+@@ -1206,6 +1206,16 @@ struct iwl_priv {
+ u16 beacon_int;
+ struct ieee80211_vif *vif;
+
++ union {
++ struct {
++ /*
++ * reporting the number of tids has AGG on. 0 means
++ * no AGGREGATION
++ */
++ u8 agg_tids_count;
++ } _agn;
++ };
++
+ /*Added for 3945 */
+ void *shared_virt;
+ dma_addr_t shared_phys;
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-03-22 16:02:35.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-03-22 16:06:01.000000000 -0400
+@@ -550,9 +550,18 @@ static void iwl_rx_calc_noise(struct iwl
+
+ #define REG_RECALIB_PERIOD (60)
+
++/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
++#define ACK_CNT_RATIO (50)
++#define BA_TIMEOUT_CNT (5)
++#define BA_TIMEOUT_MAX (16)
++
+ #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
+ /*
+- * This function checks for plcp error.
++ * This function checks for plcp error, ACK count ratios, aggregated BA
++ * timeout retries.
++ * - When the ACK count ratio is 0 and aggregated BA timeout retries is
++ * exceeding the BA_TIMEOUT_MAX, it will recover the failure by resetting
++ * the firmware.
+ * - When the plcp error is exceeding the thresholds, it will reset the radio
+ * to improve the throughput.
+ */
+@@ -562,6 +571,36 @@ void iwl_recover_from_statistics(struct
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
++ int actual_ack_cnt_delta;
++ int expected_ack_cnt_delta;
++ int ba_timeout_delta;
++
++ actual_ack_cnt_delta =
++ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
++ le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
++ expected_ack_cnt_delta =
++ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
++ le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
++ ba_timeout_delta =
++ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
++ le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
++ if ((priv->_agn.agg_tids_count > 0) &&
++ (expected_ack_cnt_delta > 0) &&
++ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
++ < ACK_CNT_RATIO) &&
++ (ba_timeout_delta > BA_TIMEOUT_CNT)) {
++ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
++ " expected_ack_cnt = %d\n",
++ actual_ack_cnt_delta, expected_ack_cnt_delta);
++ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
++ ba_timeout_delta);
++ if ((actual_ack_cnt_delta == 0) &&
++ (ba_timeout_delta >= BA_TIMEOUT_MAX)) {
++ IWL_DEBUG_RADIO(priv,
++ "call iwl_force_reset(IWL_FW_RESET)\n");
++ iwl_force_reset(priv, IWL_FW_RESET);
++ }
++ }
+
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
diff --git a/freed-ora/current/F-12/iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch b/freed-ora/current/F-12/iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch
new file mode 100644
index 000000000..f83e12abc
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch
@@ -0,0 +1,374 @@
+Back-port of the following upstream commit...
+
+commit afbdd69af0e6a0c40676d4d4b94a0a4414708eaa
+Author: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Date: Fri Jan 22 14:22:43 2010 -0800
+
+ iwlwifi: add function to reset/tune radio if needed
+
+ Adding "radio reset" function to help reset and stabilize the radio.
+
+ During normal operation, sometime for unknown reason, radio encounter
+ problem and can not recover by itself; the best way to
+ recover from it is to reset and re-tune the radio. Currently, there is
+ no RF reset command available, but since radio will get reset when
+ switching channel, use internal hw scan request to force radio
+ reset and get back to normal operation state.
+
+ The internal hw scan will only perform passive scan on the first
+ available channel (not the channel being used) in associated state. The
+ request should be ignored if already performing scan operation or STA is
+ not in associated state.
+
+ Also include an "internal_scan" debugfs file to help trigger the
+ internal scan from user mode.
+
+ Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+ Signed-off-by: John W. Linville <linville@tuxdriver.com>
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig 2010-03-22 10:23:59.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c 2010-03-22 11:26:18.000000000 -0400
+@@ -3035,6 +3035,30 @@ void iwl_update_stats(struct iwl_priv *p
+ EXPORT_SYMBOL(iwl_update_stats);
+ #endif
+
++void iwl_force_rf_reset(struct iwl_priv *priv)
++{
++ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
++ return;
++
++ if (!iwl_is_associated(priv)) {
++ IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
++ return;
++ }
++ /*
++ * There is no easy and better way to force reset the radio,
++ * the only known method is switching channel which will force to
++ * reset and tune the radio.
++ * Use internal short scan (single channel) operation to should
++ * achieve this objective.
++ * Driver should reset the radio when number of consecutive missed
++ * beacon, or any other uCode error condition detected.
++ */
++ IWL_DEBUG_INFO(priv, "perform radio reset.\n");
++ iwl_internal_short_hw_scan(priv);
++ return;
++}
++EXPORT_SYMBOL(iwl_force_rf_reset);
++
+ #ifdef CONFIG_PM
+
+ int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-03-22 10:23:59.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-03-22 11:26:18.000000000 -0400
+@@ -461,6 +461,8 @@ void iwl_init_scan_params(struct iwl_pri
+ int iwl_scan_cancel(struct iwl_priv *priv);
+ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+ int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
++int iwl_internal_short_hw_scan(struct iwl_priv *priv);
++void iwl_force_rf_reset(struct iwl_priv *priv);
+ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+ const u8 *ie, int ie_len, int left);
+ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2010-03-22 11:33:02.000000000 -0400
+@@ -1614,6 +1614,27 @@ static ssize_t iwl_dbgfs_tx_power_read(s
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ }
+
++static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct iwl_priv *priv = file->private_data;
++ char buf[8];
++ int buf_size;
++ int scan;
++
++ memset(buf, 0, sizeof(buf));
++ buf_size = min(count, sizeof(buf) - 1);
++ if (copy_from_user(buf, user_buf, buf_size))
++ return -EFAULT;
++ if (sscanf(buf, "%d", &scan) != 1)
++ return -EINVAL;
++
++ iwl_internal_short_hw_scan(priv);
++
++ return count;
++}
++
+ DEBUGFS_READ_WRITE_FILE_OPS(rx_statistics);
+ DEBUGFS_READ_WRITE_FILE_OPS(tx_statistics);
+ DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+@@ -1625,6 +1646,7 @@ DEBUGFS_READ_FILE_OPS(ucode_general_stat
+ DEBUGFS_READ_FILE_OPS(sensitivity);
+ DEBUGFS_READ_FILE_OPS(chain_noise);
+ DEBUGFS_READ_FILE_OPS(tx_power);
++DEBUGFS_WRITE_FILE_OPS(internal_scan);
+
+ /*
+ * Create the debugfs files and directories
+@@ -1674,6 +1696,7 @@ int iwl_dbgfs_register(struct iwl_priv *
+ DEBUGFS_ADD_FILE(rx_queue, debug);
+ DEBUGFS_ADD_FILE(tx_queue, debug);
+ DEBUGFS_ADD_FILE(tx_power, debug);
++ DEBUGFS_ADD_FILE(internal_scan, debug);
+ if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
+ DEBUGFS_ADD_FILE(ucode_rx_stats, debug);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, debug);
+@@ -1728,6 +1751,7 @@ void iwl_dbgfs_unregister(struct iwl_pri
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
++ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_internal_scan);
+ if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
+ file_ucode_rx_stats);
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-03-22 11:27:31.000000000 -0400
+@@ -108,6 +108,7 @@ struct iwl_debugfs {
+ struct dentry *file_sensitivity;
+ struct dentry *file_chain_noise;
+ struct dentry *file_tx_power;
++ struct dentry *file_internal_scan;
+ } dbgfs_debug_files;
+ u32 sram_offset;
+ u32 sram_len;
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-03-22 10:23:59.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-03-22 11:26:18.000000000 -0400
+@@ -1016,6 +1016,7 @@ struct iwl_priv {
+ void *scan;
+ int scan_bands;
+ struct cfg80211_scan_request *scan_request;
++ bool is_internal_short_scan;
+ u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 mgmt_tx_ant;
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c 2010-03-22 11:26:18.000000000 -0400
+@@ -316,6 +316,72 @@ u16 iwl_get_passive_dwell_time(struct iw
+ }
+ EXPORT_SYMBOL(iwl_get_passive_dwell_time);
+
++static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
++ enum ieee80211_band band,
++ struct iwl_scan_channel *scan_ch)
++{
++ const struct ieee80211_supported_band *sband;
++ const struct iwl_channel_info *ch_info;
++ u16 passive_dwell = 0;
++ u16 active_dwell = 0;
++ int i, added = 0;
++ u16 channel = 0;
++
++ sband = iwl_get_hw_mode(priv, band);
++ if (!sband) {
++ IWL_ERR(priv, "invalid band\n");
++ return added;
++ }
++
++ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
++ passive_dwell = iwl_get_passive_dwell_time(priv, band);
++
++ if (passive_dwell <= active_dwell)
++ passive_dwell = active_dwell + 1;
++
++ /* only scan single channel, good enough to reset the RF */
++ /* pick the first valid not in-use channel */
++ if (band == IEEE80211_BAND_5GHZ) {
++ for (i = 14; i < priv->channel_count; i++) {
++ if (priv->channel_info[i].channel !=
++ le16_to_cpu(priv->staging_rxon.channel)) {
++ channel = priv->channel_info[i].channel;
++ ch_info = iwl_get_channel_info(priv,
++ band, channel);
++ if (is_channel_valid(ch_info))
++ break;
++ }
++ }
++ } else {
++ for (i = 0; i < 14; i++) {
++ if (priv->channel_info[i].channel !=
++ le16_to_cpu(priv->staging_rxon.channel)) {
++ channel =
++ priv->channel_info[i].channel;
++ ch_info = iwl_get_channel_info(priv,
++ band, channel);
++ if (is_channel_valid(ch_info))
++ break;
++ }
++ }
++ }
++ if (channel) {
++ scan_ch->channel = cpu_to_le16(channel);
++ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
++ scan_ch->active_dwell = cpu_to_le16(active_dwell);
++ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
++ /* Set txpower levels to defaults */
++ scan_ch->dsp_atten = 110;
++ if (band == IEEE80211_BAND_5GHZ)
++ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
++ else
++ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
++ added++;
++ } else
++ IWL_ERR(priv, "no valid channel found\n");
++ return added;
++}
++
+ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+@@ -422,6 +488,7 @@ static int iwl_scan_initiate(struct iwl_
+
+ IWL_DEBUG_INFO(priv, "Starting scan...\n");
+ set_bit(STATUS_SCANNING, &priv->status);
++ priv->is_internal_short_scan = false;
+ priv->scan_start = jiffies;
+ priv->scan_pass_start = priv->scan_start;
+
+@@ -489,6 +556,45 @@ out_unlock:
+ }
+ EXPORT_SYMBOL(iwl_mac_hw_scan);
+
++/*
++ * internal short scan, this function should only been called while associated.
++ * It will reset and tune the radio to prevent possible RF related problem
++ */
++int iwl_internal_short_hw_scan(struct iwl_priv *priv)
++{
++ int ret = 0;
++
++ if (!iwl_is_ready_rf(priv)) {
++ ret = -EIO;
++ IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
++ goto out;
++ }
++ if (test_bit(STATUS_SCANNING, &priv->status)) {
++ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
++ ret = -EAGAIN;
++ goto out;
++ }
++ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
++ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
++ ret = -EAGAIN;
++ goto out;
++ }
++ priv->scan_bands = 0;
++ if (priv->band == IEEE80211_BAND_5GHZ)
++ priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
++ else
++ priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
++
++ IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
++ set_bit(STATUS_SCANNING, &priv->status);
++ priv->is_internal_short_scan = true;
++ queue_work(priv->workqueue, &priv->request_scan);
++
++out:
++ return ret;
++}
++EXPORT_SYMBOL(iwl_internal_short_hw_scan);
++
+ #define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
+
+ void iwl_bg_scan_check(struct work_struct *data)
+@@ -552,7 +658,8 @@ u16 iwl_fill_probe_req(struct iwl_priv *
+ if (WARN_ON(left < ie_len))
+ return len;
+
+- memcpy(pos, ies, ie_len);
++ if (ies)
++ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ left -= ie_len;
+
+@@ -654,7 +761,6 @@ static void iwl_bg_request_scan(struct w
+ unsigned long flags;
+
+ IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+-
+ spin_lock_irqsave(&priv->lock, flags);
+ interval = priv->beacon_int;
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -672,7 +778,9 @@ static void iwl_bg_request_scan(struct w
+ scan_suspend_time, interval);
+ }
+
+- if (priv->scan_request->n_ssids) {
++ if (priv->is_internal_short_scan) {
++ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
++ } else if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+@@ -740,24 +848,38 @@ static void iwl_bg_request_scan(struct w
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+- cmd_len = iwl_fill_probe_req(priv,
+- (struct ieee80211_mgmt *)scan->data,
+- priv->scan_request->ie,
+- priv->scan_request->ie_len,
+- IWL_MAX_SCAN_SIZE - sizeof(*scan));
++ if (!priv->is_internal_short_scan) {
++ cmd_len = iwl_fill_probe_req(priv,
++ (struct ieee80211_mgmt *)scan->data,
++ priv->scan_request->ie,
++ priv->scan_request->ie_len,
++ IWL_MAX_SCAN_SIZE - sizeof(*scan));
++ } else {
++ cmd_len = iwl_fill_probe_req(priv,
++ (struct ieee80211_mgmt *)scan->data,
++ NULL, 0,
++ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+
++ }
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+-
+ if (iwl_is_monitor_mode(priv))
+ scan->filter_flags = RXON_FILTER_PROMISC_MSK;
+
+ scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+ RXON_FILTER_BCON_AWARE_MSK);
+
+- scan->channel_count =
+- iwl_get_channels_for_scan(priv, band, is_active, n_probes,
+- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
+-
++ if (priv->is_internal_short_scan) {
++ scan->channel_count =
++ iwl_get_single_channel_for_scan(priv, band,
++ (void *)&scan->data[le16_to_cpu(
++ scan->tx_cmd.len)]);
++ } else {
++ scan->channel_count =
++ iwl_get_channels_for_scan(priv, band,
++ is_active, n_probes,
++ (void *)&scan->data[le16_to_cpu(
++ scan->tx_cmd.len)]);
++ }
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+ goto done;
+@@ -818,7 +940,12 @@ void iwl_bg_scan_completed(struct work_s
+
+ cancel_delayed_work(&priv->scan_check);
+
+- ieee80211_scan_completed(priv->hw, false);
++ if (!priv->is_internal_short_scan)
++ ieee80211_scan_completed(priv->hw, false);
++ else {
++ priv->is_internal_short_scan = false;
++ IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
++ }
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
diff --git a/freed-ora/current/F-12/iwlwifi_-add-internal-short-scan-support-for-3945.patch b/freed-ora/current/F-12/iwlwifi_-add-internal-short-scan-support-for-3945.patch
new file mode 100644
index 000000000..db132ee7b
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-add-internal-short-scan-support-for-3945.patch
@@ -0,0 +1,85 @@
+commit 4f4d4088b05155d4904e29d5c00316395ce32f27
+Author: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Date: Wed Feb 24 08:28:30 2010 -0800
+
+ iwlwifi: add internal short scan support for 3945
+
+ Add internal short scan support for 3945 NIC, This allows 3945 NIC
+ to support radio reset request like the other series of NICs.
+
+ Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+index dd33251..252df12 100644
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -2799,7 +2799,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
+ .len = sizeof(struct iwl3945_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+- int rc = 0;
+ struct iwl3945_scan_cmd *scan;
+ struct ieee80211_conf *conf = NULL;
+ u8 n_probes = 0;
+@@ -2827,7 +2826,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
+ "Ignoring second request.\n");
+- rc = -EIO;
+ goto done;
+ }
+
+@@ -2862,7 +2860,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
+ priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan) {
+- rc = -ENOMEM;
++ IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
+ goto done;
+ }
+ }
+@@ -2905,7 +2903,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
+ scan_suspend_time, interval);
+ }
+
+- if (priv->scan_request->n_ssids) {
++ if (priv->is_internal_short_scan) {
++ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
++ } else if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+@@ -2952,13 +2952,20 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
+ goto done;
+ }
+
+- scan->tx_cmd.len = cpu_to_le16(
++ if (!priv->is_internal_short_scan) {
++ scan->tx_cmd.len = cpu_to_le16(
+ iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan)));
+-
++ } else {
++ scan->tx_cmd.len = cpu_to_le16(
++ iwl_fill_probe_req(priv,
++ (struct ieee80211_mgmt *)scan->data,
++ NULL, 0,
++ IWL_MAX_SCAN_SIZE - sizeof(*scan)));
++ }
+ /* select Rx antennas */
+ scan->flags |= iwl3945_get_antenna_flags(priv);
+
+@@ -2980,8 +2987,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(STATUS_SCAN_HW, &priv->status);
+- rc = iwl_send_cmd_sync(priv, &cmd);
+- if (rc)
++ if (iwl_send_cmd_sync(priv, &cmd))
+ goto done;
+
+ queue_delayed_work(priv->workqueue, &priv->scan_check,
diff --git a/freed-ora/current/F-12/iwlwifi_-code-cleanup-for-connectivity-recovery.patch b/freed-ora/current/F-12/iwlwifi_-code-cleanup-for-connectivity-recovery.patch
new file mode 100644
index 000000000..587c5ff3c
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-code-cleanup-for-connectivity-recovery.patch
@@ -0,0 +1,230 @@
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig 2010-03-22 16:42:34.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c 2010-03-22 16:42:57.000000000 -0400
+@@ -136,7 +136,8 @@ static struct iwl_lib_ops iwl1000_lib =
+ .set_ct_kill = iwl1000_set_ct_threshold,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+- .recover_from_statistics = iwl_recover_from_statistics,
++ .check_plcp_health = iwl_good_plcp_health,
++ .check_ack_health = iwl_good_ack_health,
+ };
+
+ static struct iwl_ops iwl1000_ops = {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig 2010-03-22 16:42:34.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c 2010-03-22 16:42:57.000000000 -0400
+@@ -2340,7 +2340,7 @@ static struct iwl_lib_ops iwl4965_lib =
+ .temperature = iwl4965_temperature_calib,
+ .set_ct_kill = iwl4965_set_ct_threshold,
+ },
+- .recover_from_statistics = iwl_recover_from_statistics,
++ .check_plcp_health = iwl_good_plcp_health,
+ };
+
+ static struct iwl_ops iwl4965_ops = {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig 2010-03-22 16:42:34.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c 2010-03-22 16:42:57.000000000 -0400
+@@ -1580,7 +1580,8 @@ struct iwl_lib_ops iwl5000_lib = {
+ .set_ct_kill = iwl5000_set_ct_threshold,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+- .recover_from_statistics = iwl_recover_from_statistics,
++ .check_plcp_health = iwl_good_plcp_health,
++ .check_ack_health = iwl_good_ack_health,
+ };
+
+ static struct iwl_lib_ops iwl5150_lib = {
+@@ -1634,7 +1635,8 @@ static struct iwl_lib_ops iwl5150_lib =
+ .set_ct_kill = iwl5150_set_ct_threshold,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+- .recover_from_statistics = iwl_recover_from_statistics,
++ .check_plcp_health = iwl_good_plcp_health,
++ .check_ack_health = iwl_good_ack_health,
+ };
+
+ struct iwl_ops iwl5000_ops = {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig 2010-03-22 16:42:34.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c 2010-03-22 16:42:57.000000000 -0400
+@@ -138,7 +138,8 @@ static struct iwl_lib_ops iwl6000_lib =
+ .set_ct_kill = iwl6000_set_ct_threshold,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+- .recover_from_statistics = iwl_recover_from_statistics,
++ .check_plcp_health = iwl_good_plcp_health,
++ .check_ack_health = iwl_good_ack_health,
+ };
+
+ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-03-22 16:42:34.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-03-22 16:42:57.000000000 -0400
+@@ -185,8 +185,11 @@ struct iwl_lib_ops {
+ struct iwl_temp_ops temp_ops;
+ /* recover from tx queue stall */
+ void (*recover_from_tx_stall)(unsigned long data);
+- /* recover from errors showed in statistics */
+- void (*recover_from_statistics)(struct iwl_priv *priv,
++ /* check for plcp health */
++ bool (*check_plcp_health)(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt);
++ /* check for ack health */
++ bool (*check_ack_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+ };
+
+@@ -401,7 +404,9 @@ int iwl_tx_queue_reclaim(struct iwl_priv
+ /* Handlers */
+ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+-void iwl_recover_from_statistics(struct iwl_priv *priv,
++bool iwl_good_plcp_health(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt);
++bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+ void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-03-22 16:42:34.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-03-22 16:45:03.000000000 -0400
+@@ -555,24 +555,18 @@ static void iwl_rx_calc_noise(struct iwl
+ #define BA_TIMEOUT_CNT (5)
+ #define BA_TIMEOUT_MAX (16)
+
+-#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
+-/*
+- * This function checks for plcp error, ACK count ratios, aggregated BA
+- * timeout retries.
+- * - When the ACK count ratio is 0 and aggregated BA timeout retries is
+- * exceeding the BA_TIMEOUT_MAX, it will recover the failure by resetting
+- * the firmware.
+- * - When the plcp error is exceeding the thresholds, it will reset the radio
+- * to improve the throughput.
++/**
++ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
++ *
++ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
++ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
++ * operation state.
+ */
+-void iwl_recover_from_statistics(struct iwl_priv *priv,
+- struct iwl_rx_packet *pkt)
++bool iwl_good_ack_health(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt)
+ {
+- int combined_plcp_delta;
+- unsigned int plcp_msec;
+- unsigned long plcp_received_jiffies;
+- int actual_ack_cnt_delta;
+- int expected_ack_cnt_delta;
++ bool rc = true;
++ int actual_ack_cnt_delta, expected_ack_cnt_delta;
+ int ba_timeout_delta;
+
+ actual_ack_cnt_delta =
+@@ -594,13 +588,27 @@ void iwl_recover_from_statistics(struct
+ actual_ack_cnt_delta, expected_ack_cnt_delta);
+ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
+ ba_timeout_delta);
+- if ((actual_ack_cnt_delta == 0) &&
+- (ba_timeout_delta >= BA_TIMEOUT_MAX)) {
+- IWL_DEBUG_RADIO(priv,
+- "call iwl_force_reset(IWL_FW_RESET)\n");
+- iwl_force_reset(priv, IWL_FW_RESET);
+- }
++ if (!actual_ack_cnt_delta &&
++ (ba_timeout_delta >= BA_TIMEOUT_MAX))
++ rc = false;
+ }
++ return rc;
++}
++EXPORT_SYMBOL(iwl_good_ack_health);
++
++/**
++ * iwl_good_plcp_health - checks for plcp error.
++ *
++ * When the plcp error is exceeding the thresholds, reset the radio
++ * to improve the throughput.
++ */
++bool iwl_good_plcp_health(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt)
++{
++ bool rc = true;
++ int combined_plcp_delta;
++ unsigned int plcp_msec;
++ unsigned long plcp_received_jiffies;
+
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+@@ -635,7 +643,8 @@ void iwl_recover_from_statistics(struct
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+- IWL_DEBUG_RADIO(priv, PLCP_MSG,
++ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
++ "%u, %u, %u, %u, %d, %u mSecs\n",
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
+@@ -643,15 +652,42 @@ void iwl_recover_from_statistics(struct
+ le32_to_cpu(
+ priv->statistics.rx.ofdm_ht.plcp_err),
+ combined_plcp_delta, plcp_msec);
+- /*
+- * Reset the RF radio due to the high plcp
+- * error rate
+- */
+- iwl_force_reset(priv, IWL_RF_RESET);
++ rc = false;
++ }
++ }
++ return rc;
++}
++EXPORT_SYMBOL(iwl_good_plcp_health);
++
++static void iwl_recover_from_statistics(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt)
++{
++ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
++ return;
++ if (iwl_is_associated(priv)) {
++ if (priv->cfg->ops->lib->check_ack_health) {
++ if (!priv->cfg->ops->lib->check_ack_health(
++ priv, pkt)) {
++ /*
++ * low ack count detected
++ * restart Firmware
++ */
++ IWL_ERR(priv, "low ack count detected, "
++ "restart firmware\n");
++ iwl_force_reset(priv, IWL_FW_RESET);
++ }
++ } else if (priv->cfg->ops->lib->check_plcp_health) {
++ if (!priv->cfg->ops->lib->check_plcp_health(
++ priv, pkt)) {
++ /*
++ * high plcp error detected
++ * reset Radio
++ */
++ iwl_force_reset(priv, IWL_RF_RESET);
++ }
+ }
+ }
+ }
+-EXPORT_SYMBOL(iwl_recover_from_statistics);
+
+ void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+@@ -670,8 +706,7 @@ void iwl_rx_statistics(struct iwl_priv *
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+- if (priv->cfg->ops->lib->recover_from_statistics)
+- priv->cfg->ops->lib->recover_from_statistics(priv, pkt);
++ iwl_recover_from_statistics(priv, pkt);
+
+ memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
+
diff --git a/freed-ora/current/F-12/iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch b/freed-ora/current/F-12/iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch
new file mode 100644
index 000000000..60037acae
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch
@@ -0,0 +1,146 @@
+From 2386b8d18106262e27c9ca1a674a1018af29bdde Mon Sep 17 00:00:00 2001
+From: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Date: Tue, 16 Mar 2010 10:46:31 -0700
+Subject: [PATCH 17/17] iwlwifi: iwl_good_ack_health() only apply to AGN device
+
+iwl_good_ack_health() check for expected and actual ack count which only
+apply to aggregation mode. Move the function to iwlagn module.
+
+Reported-by: Chantry Xavier <chantry.xavier@gmail.com>
+Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+---
+ drivers/net/wireless/iwlwifi/iwl-agn.c | 45 ++++++++++++++++++++++++++++++
+ drivers/net/wireless/iwlwifi/iwl-core.h | 2 +
+ drivers/net/wireless/iwlwifi/iwl-rx.c | 46 -------------------------------
+ 3 files changed, 47 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index c22db6c..d57f215 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -1316,6 +1316,51 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
+ iwl_enable_interrupts(priv);
+ }
+
++/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
++#define ACK_CNT_RATIO (50)
++#define BA_TIMEOUT_CNT (5)
++#define BA_TIMEOUT_MAX (16)
++
++/**
++ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
++ *
++ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
++ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
++ * operation state.
++ */
++bool iwl_good_ack_health(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt)
++{
++ bool rc = true;
++ int actual_ack_cnt_delta, expected_ack_cnt_delta;
++ int ba_timeout_delta;
++
++ actual_ack_cnt_delta =
++ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
++ le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
++ expected_ack_cnt_delta =
++ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
++ le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
++ ba_timeout_delta =
++ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
++ le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
++ if ((priv->_agn.agg_tids_count > 0) &&
++ (expected_ack_cnt_delta > 0) &&
++ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
++ < ACK_CNT_RATIO) &&
++ (ba_timeout_delta > BA_TIMEOUT_CNT)) {
++ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
++ " expected_ack_cnt = %d\n",
++ actual_ack_cnt_delta, expected_ack_cnt_delta);
++ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
++ ba_timeout_delta);
++ if (!actual_ack_cnt_delta &&
++ (ba_timeout_delta >= BA_TIMEOUT_MAX))
++ rc = false;
++ }
++ return rc;
++}
++
+
+ /******************************************************************************
+ *
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
+index 8bf0c39..ca4a516 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.h
++++ b/drivers/net/wireless/iwlwifi/iwl-core.h
+@@ -584,6 +584,8 @@ void iwl_disable_ict(struct iwl_priv *priv);
+ int iwl_alloc_isr_ict(struct iwl_priv *priv);
+ void iwl_free_isr_ict(struct iwl_priv *priv);
+ irqreturn_t iwl_isr_ict(int irq, void *data);
++bool iwl_good_ack_health(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt);
+
+ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
+ {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
+index 1b2a3fc..054d169 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
+@@ -592,52 +592,6 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
+
+ #define REG_RECALIB_PERIOD (60)
+
+-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+-#define ACK_CNT_RATIO (50)
+-#define BA_TIMEOUT_CNT (5)
+-#define BA_TIMEOUT_MAX (16)
+-
+-/**
+- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+- *
+- * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
+- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+- * operation state.
+- */
+-bool iwl_good_ack_health(struct iwl_priv *priv,
+- struct iwl_rx_packet *pkt)
+-{
+- bool rc = true;
+- int actual_ack_cnt_delta, expected_ack_cnt_delta;
+- int ba_timeout_delta;
+-
+- actual_ack_cnt_delta =
+- le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
+- le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
+- expected_ack_cnt_delta =
+- le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
+- le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
+- ba_timeout_delta =
+- le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
+- le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
+- if ((priv->_agn.agg_tids_count > 0) &&
+- (expected_ack_cnt_delta > 0) &&
+- (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
+- < ACK_CNT_RATIO) &&
+- (ba_timeout_delta > BA_TIMEOUT_CNT)) {
+- IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
+- " expected_ack_cnt = %d\n",
+- actual_ack_cnt_delta, expected_ack_cnt_delta);
+- IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
+- ba_timeout_delta);
+- if (!actual_ack_cnt_delta &&
+- (ba_timeout_delta >= BA_TIMEOUT_MAX))
+- rc = false;
+- }
+- return rc;
+-}
+-EXPORT_SYMBOL(iwl_good_ack_health);
+-
+ /**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+--
+1.6.3.3
+
diff --git a/freed-ora/current/F-12/iwlwifi_-move-plcp-check-to-separated-function.patch b/freed-ora/current/F-12/iwlwifi_-move-plcp-check-to-separated-function.patch
new file mode 100644
index 000000000..75d8a4df5
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-move-plcp-check-to-separated-function.patch
@@ -0,0 +1,179 @@
+This patch is not upstream yet...
+
+From 171e0b730fd471b8df0d138daf382b8f6835fb18 Mon Sep 17 00:00:00 2001
+From: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Date: Thu, 4 Mar 2010 13:38:58 -0800
+Subject: [PATCH 14/17] iwlwifi: move plcp check to separated function
+
+Move the plcp error checking into stand alone function and pointed by ops
+to accommodate devices not needing this recovery.
+
+Signed-off-by: Trieu 'Andrew' Nguyen <trieux.t.nguyen@intel.com>
+Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig 2010-03-22 15:48:54.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c 2010-03-22 15:56:08.000000000 -0400
+@@ -136,6 +136,7 @@ static struct iwl_lib_ops iwl1000_lib =
+ .set_ct_kill = iwl1000_set_ct_threshold,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
++ .recover_from_statistics = iwl_recover_from_statistics,
+ };
+
+ static struct iwl_ops iwl1000_ops = {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig 2010-03-22 15:48:54.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c 2010-03-22 15:56:08.000000000 -0400
+@@ -2340,6 +2340,7 @@ static struct iwl_lib_ops iwl4965_lib =
+ .temperature = iwl4965_temperature_calib,
+ .set_ct_kill = iwl4965_set_ct_threshold,
+ },
++ .recover_from_statistics = iwl_recover_from_statistics,
+ };
+
+ static struct iwl_ops iwl4965_ops = {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig 2010-03-22 15:48:54.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c 2010-03-22 15:56:08.000000000 -0400
+@@ -1580,6 +1580,7 @@ struct iwl_lib_ops iwl5000_lib = {
+ .set_ct_kill = iwl5000_set_ct_threshold,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
++ .recover_from_statistics = iwl_recover_from_statistics,
+ };
+
+ static struct iwl_lib_ops iwl5150_lib = {
+@@ -1633,6 +1634,7 @@ static struct iwl_lib_ops iwl5150_lib =
+ .set_ct_kill = iwl5150_set_ct_threshold,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
++ .recover_from_statistics = iwl_recover_from_statistics,
+ };
+
+ struct iwl_ops iwl5000_ops = {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig 2010-03-22 15:51:12.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c 2010-03-22 15:56:08.000000000 -0400
+@@ -138,6 +138,7 @@ static struct iwl_lib_ops iwl6000_lib =
+ .set_ct_kill = iwl6000_set_ct_threshold,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
++ .recover_from_statistics = iwl_recover_from_statistics,
+ };
+
+ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-03-22 15:48:54.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-03-22 15:56:08.000000000 -0400
+@@ -185,6 +185,9 @@ struct iwl_lib_ops {
+ struct iwl_temp_ops temp_ops;
+ /* recover from tx queue stall */
+ void (*recover_from_tx_stall)(unsigned long data);
++ /* recover from errors showed in statistics */
++ void (*recover_from_statistics)(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt);
+ };
+
+ struct iwl_ops {
+@@ -398,6 +401,8 @@ int iwl_tx_queue_reclaim(struct iwl_priv
+ /* Handlers */
+ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
++void iwl_recover_from_statistics(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt);
+ void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-03-22 15:24:28.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-03-22 16:02:35.000000000 -0400
+@@ -551,25 +551,18 @@ static void iwl_rx_calc_noise(struct iwl
+ #define REG_RECALIB_PERIOD (60)
+
+ #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
+-void iwl_rx_statistics(struct iwl_priv *priv,
+- struct iwl_rx_mem_buffer *rxb)
++/*
++ * This function checks for plcp error.
++ * - When the plcp error is exceeding the thresholds, it will reset the radio
++ * to improve the throughput.
++ */
++void iwl_recover_from_statistics(struct iwl_priv *priv,
++ struct iwl_rx_packet *pkt)
+ {
+- int change;
+- struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
+
+- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
+- (int)sizeof(priv->statistics),
+- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+-
+- change = ((priv->statistics.general.temperature !=
+- pkt->u.stats.general.temperature) ||
+- ((priv->statistics.flag &
+- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+- (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+-
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+@@ -590,11 +583,11 @@ void iwl_rx_statistics(struct iwl_priv *
+ le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+- ((combined_plcp_delta * 100) / plcp_msec) >
++ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+- * if plcp_err exceed the threshold, the following
+- * data is printed in csv format:
++ * if plcp_err exceed the threshold,
++ * the following data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+@@ -609,9 +602,8 @@ void iwl_rx_statistics(struct iwl_priv *
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
+ le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
+ le32_to_cpu(
+- priv->statistics.rx.ofdm_ht.plcp_err),
++ priv->statistics.rx.ofdm_ht.plcp_err),
+ combined_plcp_delta, plcp_msec);
+-
+ /*
+ * Reset the RF radio due to the high plcp
+ * error rate
+@@ -619,6 +611,28 @@ void iwl_rx_statistics(struct iwl_priv *
+ iwl_force_reset(priv, IWL_RF_RESET);
+ }
+ }
++}
++EXPORT_SYMBOL(iwl_recover_from_statistics);
++
++void iwl_rx_statistics(struct iwl_priv *priv,
++ struct iwl_rx_mem_buffer *rxb)
++{
++ int change;
++ struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
++
++
++ IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
++ (int)sizeof(priv->statistics),
++ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
++
++ change = ((priv->statistics.general.temperature !=
++ pkt->u.stats.general.temperature) ||
++ ((priv->statistics.flag &
++ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
++ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
++
++ if (priv->cfg->ops->lib->recover_from_statistics)
++ priv->cfg->ops->lib->recover_from_statistics(priv, pkt);
+
+ memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
+
diff --git a/freed-ora/current/F-12/iwlwifi_-multiple-force-reset-mode.patch b/freed-ora/current/F-12/iwlwifi_-multiple-force-reset-mode.patch
new file mode 100644
index 000000000..adc3b246e
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-multiple-force-reset-mode.patch
@@ -0,0 +1,152 @@
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig 2010-03-22 16:37:23.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c 2010-03-22 16:39:46.000000000 -0400
+@@ -3035,7 +3035,7 @@ void iwl_update_stats(struct iwl_priv *p
+ EXPORT_SYMBOL(iwl_update_stats);
+ #endif
+
+-void iwl_force_rf_reset(struct iwl_priv *priv)
++static void iwl_force_rf_reset(struct iwl_priv *priv)
+ {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+@@ -3057,7 +3057,47 @@ void iwl_force_rf_reset(struct iwl_priv
+ iwl_internal_short_hw_scan(priv);
+ return;
+ }
+-EXPORT_SYMBOL(iwl_force_rf_reset);
++
++#define IWL_DELAY_NEXT_FORCE_RESET (HZ*3)
++
++int iwl_force_reset(struct iwl_priv *priv, int mode)
++{
++ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
++ return -EINVAL;
++
++ if (priv->last_force_reset_jiffies &&
++ time_after(priv->last_force_reset_jiffies +
++ IWL_DELAY_NEXT_FORCE_RESET, jiffies)) {
++ IWL_DEBUG_INFO(priv, "force reset rejected\n");
++ return -EAGAIN;
++ }
++
++ IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
++
++ switch (mode) {
++ case IWL_RF_RESET:
++ iwl_force_rf_reset(priv);
++ break;
++ case IWL_FW_RESET:
++ IWL_ERR(priv, "On demand firmware reload\n");
++ /* Set the FW error flag -- cleared on iwl_down */
++ set_bit(STATUS_FW_ERROR, &priv->status);
++ wake_up_interruptible(&priv->wait_command_queue);
++ /*
++ * Keep the restart process from trying to send host
++ * commands by clearing the INIT status bit
++ */
++ clear_bit(STATUS_READY, &priv->status);
++ queue_work(priv->workqueue, &priv->restart);
++ break;
++ default:
++ IWL_DEBUG_INFO(priv, "invalid reset request.\n");
++ return -EINVAL;
++ }
++ priv->last_force_reset_jiffies = jiffies;
++
++ return 0;
++}
+
+ #ifdef CONFIG_PM
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-03-22 16:37:23.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-03-22 16:39:46.000000000 -0400
+@@ -465,7 +465,7 @@ int iwl_scan_cancel(struct iwl_priv *pri
+ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+ int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+ int iwl_internal_short_hw_scan(struct iwl_priv *priv);
+-void iwl_force_rf_reset(struct iwl_priv *priv);
++int iwl_force_reset(struct iwl_priv *priv, int mode);
+ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+ const u8 *ie, int ie_len, int left);
+ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-03-22 16:37:23.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-03-22 16:39:46.000000000 -0400
+@@ -972,6 +972,11 @@ struct traffic_stats {
+ #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
+ #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+
++enum iwl_reset {
++ IWL_RF_RESET = 0,
++ IWL_FW_RESET,
++};
++
+ struct iwl_priv {
+
+ /* ieee device used by generic ieee processing code */
+@@ -1003,6 +1008,9 @@ struct iwl_priv {
+ /* storing the jiffies when the plcp error rate is received */
+ unsigned long plcp_jiffies;
+
++ /* force reset */
++ unsigned long last_force_reset_jiffies;
++
+ /* we allocate array of iwl4965_channel_info for NIC's valid channels.
+ * Access via channel # using indirect index array */
+ struct iwl_channel_info *channel_info; /* channel info array */
+@@ -1025,7 +1033,6 @@ struct iwl_priv {
+ unsigned long scan_start;
+ unsigned long scan_pass_start;
+ unsigned long scan_start_tsf;
+- unsigned long last_internal_scan_jiffies;
+ void *scan;
+ int scan_bands;
+ struct cfg80211_scan_request *scan_request;
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-03-22 16:37:23.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-03-22 16:39:46.000000000 -0400
+@@ -616,7 +616,7 @@ void iwl_rx_statistics(struct iwl_priv *
+ * Reset the RF radio due to the high plcp
+ * error rate
+ */
+- iwl_force_rf_reset(priv);
++ iwl_force_reset(priv, IWL_RF_RESET);
+ }
+ }
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig 2010-03-22 16:37:23.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c 2010-03-22 16:39:46.000000000 -0400
+@@ -255,8 +255,6 @@ static void iwl_rx_scan_complete_notif(s
+
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
+- else
+- priv->last_internal_scan_jiffies = jiffies;
+
+ IWL_DEBUG_INFO(priv, "Setting scan to off\n");
+
+@@ -564,8 +562,6 @@ EXPORT_SYMBOL(iwl_mac_hw_scan);
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+-#define IWL_DELAY_NEXT_INTERNAL_SCAN (HZ*1)
+-
+ int iwl_internal_short_hw_scan(struct iwl_priv *priv)
+ {
+ int ret = 0;
+@@ -585,12 +581,6 @@ int iwl_internal_short_hw_scan(struct iw
+ ret = -EAGAIN;
+ goto out;
+ }
+- if (priv->last_internal_scan_jiffies &&
+- time_after(priv->last_internal_scan_jiffies +
+- IWL_DELAY_NEXT_INTERNAL_SCAN, jiffies)) {
+- IWL_DEBUG_SCAN(priv, "internal scan rejected\n");
+- goto out;
+- }
+
+ priv->scan_bands = 0;
+ if (priv->band == IEEE80211_BAND_5GHZ)
diff --git a/freed-ora/current/F-12/iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch b/freed-ora/current/F-12/iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch
new file mode 100644
index 000000000..0faa50585
--- /dev/null
+++ b/freed-ora/current/F-12/iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch
@@ -0,0 +1,120 @@
+Back-port of the following upstream commit...
+
+commit 8a472da431998b7357e6dc562e79a3061ed56cad
+Author: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Date: Thu Feb 18 22:03:06 2010 -0800
+
+ iwlwifi: separated time check for different type of force reset
+
+ Use different timing duration check for different type of force reset,
+ force reset request can come from different source and based on
+ different reason; one type of reset request should not block other type of
+ reset request.
+
+ Adding structure to keep track of different force reset request.
+
+ Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+ Signed-off-by: John W. Linville <linville@tuxdriver.com>
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig 2010-03-22 15:24:28.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-core.c 2010-03-22 15:40:48.000000000 -0400
+@@ -1497,6 +1497,12 @@ int iwl_init_drv(struct iwl_priv *priv)
+
+ priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
+
++ /* initialize force reset */
++ priv->force_reset[IWL_RF_RESET].reset_duration =
++ IWL_DELAY_NEXT_FORCE_RF_RESET;
++ priv->force_reset[IWL_FW_RESET].reset_duration =
++ IWL_DELAY_NEXT_FORCE_FW_RELOAD;
++
+ /* Choose which receivers/antennas to use */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+@@ -3058,22 +3064,30 @@ static void iwl_force_rf_reset(struct iw
+ return;
+ }
+
+-#define IWL_DELAY_NEXT_FORCE_RESET (HZ*3)
+
+ int iwl_force_reset(struct iwl_priv *priv, int mode)
+ {
++ struct iwl_force_reset *force_reset;
++
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EINVAL;
+
+- if (priv->last_force_reset_jiffies &&
+- time_after(priv->last_force_reset_jiffies +
+- IWL_DELAY_NEXT_FORCE_RESET, jiffies)) {
++ if (mode >= IWL_MAX_FORCE_RESET) {
++ IWL_DEBUG_INFO(priv, "invalid reset request.\n");
++ return -EINVAL;
++ }
++ force_reset = &priv->force_reset[mode];
++ force_reset->reset_request_count++;
++ if (force_reset->last_force_reset_jiffies &&
++ time_after(force_reset->last_force_reset_jiffies +
++ force_reset->reset_duration, jiffies)) {
+ IWL_DEBUG_INFO(priv, "force reset rejected\n");
++ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+-
++ force_reset->reset_success_count++;
++ force_reset->last_force_reset_jiffies = jiffies;
+ IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
+-
+ switch (mode) {
+ case IWL_RF_RESET:
+ iwl_force_rf_reset(priv);
+@@ -3090,12 +3104,7 @@ int iwl_force_reset(struct iwl_priv *pri
+ clear_bit(STATUS_READY, &priv->status);
+ queue_work(priv->workqueue, &priv->restart);
+ break;
+- default:
+- IWL_DEBUG_INFO(priv, "invalid reset request.\n");
+- return -EINVAL;
+ }
+- priv->last_force_reset_jiffies = jiffies;
+-
+ return 0;
+ }
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h
+--- linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-03-22 15:33:00.000000000 -0400
++++ linux-2.6.32.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-03-22 15:37:04.000000000 -0400
+@@ -973,9 +973,21 @@ struct traffic_stats {
+ #define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
+ #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+
++#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
++#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
++
+ enum iwl_reset {
+ IWL_RF_RESET = 0,
+ IWL_FW_RESET,
++ IWL_MAX_FORCE_RESET,
++};
++
++struct iwl_force_reset {
++ int reset_request_count;
++ int reset_success_count;
++ int reset_reject_count;
++ unsigned long reset_duration;
++ unsigned long last_force_reset_jiffies;
+ };
+
+ struct iwl_priv {
+@@ -1010,7 +1022,7 @@ struct iwl_priv {
+ unsigned long plcp_jiffies;
+
+ /* force reset */
+- unsigned long last_force_reset_jiffies;
++ struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+
+ /* we allocate array of iwl4965_channel_info for NIC's valid channels.
+ * Access via channel # using indirect index array */
diff --git a/freed-ora/current/F-12/kernel.spec b/freed-ora/current/F-12/kernel.spec
index aae93cb46..bcab29ca7 100644
--- a/freed-ora/current/F-12/kernel.spec
+++ b/freed-ora/current/F-12/kernel.spec
@@ -37,19 +37,18 @@ Summary: The Linux kernel
%endif
%endif
-# fedora_build defines which build revision of this kernel version we're
-# building. Rather than incrementing forever, as with the prior versioning
-# setup, we set fedora_cvs_origin to the current cvs revision s/1.// of the
-# kernel spec when the kernel is rebased, so fedora_build automatically
-# works out to the offset from the rebase, so it doesn't get too ginormous.
+# baserelease defines which build revision of this kernel version we're
+# building. We used to call this fedora_build, but the magical name
+# baserelease is matched by the rpmdev-bumpspec tool, which you should use.
#
-# If you're building on a branch, the RCS revision will be something like
-# 1.1205.1.1. In this case we drop the initial 1, subtract fedora_cvs_origin
-# from the second number, and then append the rest of the RCS string as is.
-# Don't stare at the awk too long, you'll go blind.
-%define fedora_cvs_origin 1962
-%define fedora_cvs_revision() %2
-%global fedora_build %(echo %{fedora_cvs_origin}.%{fedora_cvs_revision $Revision: 1.2116 $} | awk -F . '{ OFS = "."; ORS = ""; print $3 - $1 ; i = 4 ; OFS = ""; while (i <= NF) { print ".", $i ; i++} }')
+# We used to have some extra magic weirdness to bump this automatically,
+# but now we don't. Just use: rpmdev-bumpspec -c 'comment for changelog'
+# When changing base_sublevel below or going from rc to a final kernel,
+# reset this by hand to 1 (or to 0 and then use rpmdev-bumpspec).
+# scripts/rebase.sh should be made to do that for you, actually.
+#
+%global baserelease 159
+%global fedora_build %{baserelease}
# base_sublevel is the kernel version we're starting with and patching
# on top of -- for example, 2.6.22-rc7-git1 starts with a 2.6.21 base,
@@ -74,7 +73,7 @@ Summary: The Linux kernel
%if 0%{?released_kernel}
# Do we have a -stable update to apply?
-%define stable_update 16
+%define stable_update 18
# Is it a -stable RC?
%define stable_rc 0
# Set rpm version accordingly
@@ -742,6 +741,7 @@ Patch1700: linux-2.6-x86-64-fbdev-primary.patch
# nouveau + drm fixes
Patch1810: drm-upgrayedd.patch
+Patch1811: drm-upgrayed-fixes.patch
Patch1813: drm-radeon-pm.patch
#Patch1814: drm-nouveau.patch
Patch1818: drm-i915-resume-force-mode.patch
@@ -762,6 +762,7 @@ Patch1844: drm-nouveau-kconfig.patch
Patch1845: drm-nouveau-mutex.patch
Patch1846: drm-nouveau-update.patch
Patch1847: drm-nouveau-d620.patch
+Patch1848: drm-nouveau-nva3-noaccel.patch
# kludge to make ich9 e1000 work
Patch2000: linux-2.6-e1000-ich9.patch
@@ -787,7 +788,6 @@ Patch2096: linux-2.6-v4l-dvb-add-kworld-a340-support.patch
# fs fixes
# ext4/quota
-Patch3000: linux-2.6-ext4-quota-metadata-reservation.patch
# NFSv4
Patch3050: linux-2.6-nfsd4-proots.patch
@@ -845,9 +845,6 @@ Patch12414: iwlwifi_-Recover-TX-flow-failure.patch
Patch12415: iwlwifi_-code-cleanup-for-connectivity-recovery.patch
Patch12416: iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch
-# fix possible corruption with ssd
-Patch12700: ext4-issue-discard-operation-before-releasing-blocks.patch
-
# iwlwifi: fix scan races
Patch12910: iwlwifi-fix-scan-races.patch
# iwlwifi: fix internal scan race
@@ -856,34 +853,26 @@ Patch12911: iwlwifi-fix-internal-scan-race.patch
Patch12912: iwlwifi-recover_from_tx_stall.patch
Patch12921: iwlwifi-manage-QoS-by-mac-stack.patch
-Patch12922: mac80211-do-not-wipe-out-old-supported-rates.patch
Patch12923: mac80211-explicitly-disable-enable-QoS.patch
-Patch12924: mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch
-
-# iwlwifi: cancel scan watchdog in iwl_bg_abort_scan
-Patch13020: iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch
# l2tp: fix oops in pppol2tp_xmit (#607054)
Patch13030: l2tp-fix-oops-in-pppol2tp_xmit.patch
-Patch14000: sched-fix-over-scheduling-bug.patch
-Patch14010: ethtool-fix-buffer-overflow.patch
-
Patch14020: inotify-fix-inotify-oneshot-support.patch
Patch14030: inotify-send-IN_UNMOUNT-events.patch
Patch14040: crypto-testmgr-add-null-test-for-aesni.patch
Patch14050: crypto-add-async-hash-testing.patch
-Patch14100: cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch
Patch14110: ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch
-Patch14115: xfs-prevent-swapext-from-operating-on-write-only-files.patch
-
-Patch14120: usb-obey-the-sysfs-power-wakeup-setting.patch
+Patch14120: ext4-fix-freeze-deadlock-under-io.patch
# Red Hat Bugzilla #610911
Patch14130: kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch
+Patch14140: hid-01-usbhid-initialize-interface-pointers-early-enough.patch
+Patch14141: hid-02-fix-suspend-crash-by-moving-initializations-earlier.patch
+
# ==============================================================================
%endif
@@ -1186,16 +1175,23 @@ ApplyOptionalPatch()
%endif
%endif
-# We can share hardlinked source trees by putting a list of
-# directory names of the CVS checkouts that we want to share
-# with in .shared-srctree. (Full pathnames are required.)
-[ -f .shared-srctree ] && sharedirs=$(cat .shared-srctree)
+# %{vanillaversion} : the full version name, e.g. 2.6.35-rc6-git3
+# %{kversion} : the base version, e.g. 2.6.34
+
+# Use kernel-%{kversion}%{?dist} as the top-level directory name
+# so we can prep different trees within a single git directory.
-if [ ! -d kernel-%{kversion}/vanilla-%{vanillaversion} ]; then
+# Build a list of the other top-level kernel tree directories.
+# This will be used to hardlink identical vanilla subdirs.
+sharedirs=$(find "$PWD" -maxdepth 1 -type d -name 'kernel-2.6.*' \
+ | grep -x -v "$PWD"/kernel-%{kversion}%{?dist}) ||:
- if [ -d kernel-%{kversion}/vanilla-%{kversion} ]; then
+if [ ! -d kernel-%{kversion}%{?dist}/vanilla-%{vanillaversion} ]; then
- cd kernel-%{kversion}
+ if [ -d kernel-%{kversion}%{?dist}/vanilla-%{kversion} ]; then
+
+ # The base vanilla version already exists.
+ cd kernel-%{kversion}%{?dist}
# Any vanilla-* directories other than the base one are stale.
for dir in vanilla-*; do
@@ -1204,18 +1200,18 @@ if [ ! -d kernel-%{kversion}/vanilla-%{vanillaversion} ]; then
else
- # Ok, first time we do a make prep.
rm -f pax_global_header
+ # Look for an identical base vanilla dir that can be hardlinked.
for sharedir in $sharedirs ; do
- if [[ ! -z $sharedir && -d $sharedir/kernel-%{kversion}/vanilla-%{kversion} ]] ; then
+ if [[ ! -z $sharedir && -d $sharedir/vanilla-%{kversion} ]] ; then
break
fi
done
- if [[ ! -z $sharedir && -d $sharedir/kernel-%{kversion}/vanilla-%{kversion} ]] ; then
-%setup -q -n kernel-%{kversion} -c -T
- cp -rl $sharedir/kernel-%{kversion}/vanilla-%{kversion} .
+ if [[ ! -z $sharedir && -d $sharedir/vanilla-%{kversion} ]] ; then
+%setup -q -n kernel-%{kversion}%{?dist} -c -T
+ cp -rl $sharedir/vanilla-%{kversion} .
else
-%setup -q -n kernel-%{kversion} -c
+%setup -q -n kernel-%{kversion}%{?dist} -c
mv linux-%{kversion} vanilla-%{kversion}
fi
@@ -1226,16 +1222,17 @@ perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION =/" vanilla-%{kversion}/Makefile
%if "%{kversion}" != "%{vanillaversion}"
for sharedir in $sharedirs ; do
- if [[ ! -z $sharedir && -d $sharedir/kernel-%{kversion}/vanilla-%{vanillaversion} ]] ; then
+ if [[ ! -z $sharedir && -d $sharedir/vanilla-%{vanillaversion} ]] ; then
break
fi
done
- if [[ ! -z $sharedir && -d $sharedir/kernel-%{kversion}/vanilla-%{vanillaversion} ]] ; then
+ if [[ ! -z $sharedir && -d $sharedir/vanilla-%{vanillaversion} ]] ; then
- cp -rl $sharedir/kernel-%{kversion}/vanilla-%{vanillaversion} .
+ cp -rl $sharedir/vanilla-%{vanillaversion} .
else
+ # Need to apply patches to the base vanilla version.
cp -rl vanilla-%{kversion} vanilla-%{vanillaversion}
cd vanilla-%{vanillaversion}
@@ -1260,10 +1257,13 @@ perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION =/" vanilla-%{kversion}/Makefile
%endif
else
- # We already have a vanilla dir.
- cd kernel-%{kversion}
+
+ # We already have all vanilla dirs, just change to the top-level directory.
+ cd kernel-%{kversion}%{?dist}
+
fi
+# Now build the fedora kernel tree.
if [ -d linux-%{kversion}.%{_target_cpu} ]; then
# Just in case we ctrl-c'd a prep already
rm -rf deleteme.%{_target_cpu}
@@ -1377,7 +1377,6 @@ ApplyPatch linux-2.6-execshield.patch
#
# ext4
-ApplyPatch linux-2.6-ext4-quota-metadata-reservation.patch
# xfs
@@ -1517,6 +1516,7 @@ ApplyPatch linux-2.6-phylib-autoload.patch
ApplyPatch linux-2.6-x86-64-fbdev-primary.patch
# Nouveau DRM + drm fixes
ApplyPatch drm-upgrayedd.patch
+ApplyPatch drm-upgrayed-fixes.patch
#ApplyPatch drm-intel-big-hammer.patch
#ApplyPatch drm-intel-no-tv-hotplug.patch
ApplyOptionalPatch drm-intel-next.patch
@@ -1534,6 +1534,7 @@ ApplyPatch drm-nouveau-safetile-getparam.patch
ApplyPatch drm-nouveau-kconfig.patch
ApplyPatch drm-nouveau-update.patch
ApplyPatch drm-nouveau-d620.patch
+ApplyPatch drm-nouveau-nva3-noaccel.patch
# linux1394 git patches
#ApplyPatch linux-2.6-firewire-git-update.patch
@@ -1569,7 +1570,7 @@ ApplyPatch ice1712-fix-revo71-mixer-names.patch
ApplyPatch linux-2.6-b43_-Rewrite-DMA-Tx-status-handling-sanity-checks.patch
# rhbz#533746
-ApplyPatch ssb_check_for_sprom.patch
+#ApplyPatch ssb_check_for_sprom.patch
# backport iwlwifi fixes (thanks, sgruszka!) -- drop when stable catches-up
ApplyPatch iwlwifi-reset-card-during-probe.patch
@@ -1588,9 +1589,6 @@ ApplyPatch iwlwifi_-Recover-TX-flow-failure.patch
ApplyPatch iwlwifi_-code-cleanup-for-connectivity-recovery.patch
ApplyPatch iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch
-# fix possible corruption with ssd
-ApplyPatch ext4-issue-discard-operation-before-releasing-blocks.patch
-
# iwlwifi: fix scan races
ApplyPatch iwlwifi-fix-scan-races.patch
# iwlwifi: fix internal scan race
@@ -1601,21 +1599,10 @@ ApplyPatch iwlwifi-recover_from_tx_stall.patch
# mac80211/iwlwifi fix connections to some APs (rhbz#558002)
ApplyPatch mac80211-explicitly-disable-enable-QoS.patch
ApplyPatch iwlwifi-manage-QoS-by-mac-stack.patch
-ApplyPatch mac80211-do-not-wipe-out-old-supported-rates.patch
-ApplyPatch mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch
-
-# iwlwifi: cancel scan watchdog in iwl_bg_abort_scan
-ApplyPatch iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch
# l2tp: fix oops in pppol2tp_xmit (#607054)
ApplyPatch l2tp-fix-oops-in-pppol2tp_xmit.patch
-# fix performance problem with CGROUPS
-ApplyPatch sched-fix-over-scheduling-bug.patch
-
-# CVE-2010-2478
-ApplyPatch ethtool-fix-buffer-overflow.patch
-
# fix broken oneshot support and missing umount events (F13#607327)
ApplyPatch inotify-fix-inotify-oneshot-support.patch
ApplyPatch inotify-send-IN_UNMOUNT-events.patch
@@ -1625,18 +1612,17 @@ ApplyPatch crypto-testmgr-add-null-test-for-aesni.patch
# add tests for crypto async hashing (#571577)
ApplyPatch crypto-add-async-hash-testing.patch
-# CVE-2010-2524
-ApplyPatch cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch
# CVE-2010-2066
ApplyPatch ext4-make-sure-the-move_ext-ioctl-can-t-overwrite-append-only-files.patch
-# CVE-2010-2266
-ApplyPatch xfs-prevent-swapext-from-operating-on-write-only-files.patch
-
-# fix broken USB device wakeups (#617559)
-ApplyPatch usb-obey-the-sysfs-power-wakeup-setting.patch
+# Fix deadlock caused by patch in 2.6.32.17
+ApplyPatch ext4-fix-freeze-deadlock-under-io.patch
ApplyPatch kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch
+# RHBZ #592785
+ApplyPatch hid-01-usbhid-initialize-interface-pointers-early-enough.patch
+ApplyPatch hid-02-fix-suspend-crash-by-moving-initializations-earlier.patch
+
# END OF PATCH APPLICATIONS ====================================================
%endif
@@ -2287,6 +2273,53 @@ fi
%kernel_variant_files -k vmlinux %{with_kdump} kdump
%changelog
+* Tue Aug 10 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.32.18-159
+- Linux 2.6.32.18
+- Backport nouveau noaccel fix for nva3+ cards from f13.
+- ext4-fix-freeze-deadlock-under-io.patch:
+ Fix deadlock caused by patch in 2.6.32.17
+ (0036-ext4-don-t-return-to-userspace-after-freezing-the-fs.patch)
+
+* Tue Aug 10 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.32.18-158.rc1
+- Bring back drm-upgrayed-fixes.patch, dropped in the
+ 2.6.32.16 update. (#620955)
+- Revert upstream DRM stable fix we already have:
+ drm-i915-give-up-on-8xx-lid-status.patch
+
+* Sat Aug 07 2010 Chuck Ebbert <cebbert@redhat.com>
+- Linux 2.6.32.18-rc1
+- Revert DRM patches from -stable we already have:
+ drm-i915-Fix-LVDS-presence-check
+ drm-i915-parse-child-device-from-vbt.patch
+- Comment out patches merged in -stable:
+ xfs-prevent-swapext-from-operating-on-write-only-files.patch
+ cifs-fix-dns-resolver.patch
+
+* Fri Aug 06 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.32.17-157
+- Fix USB HID initialization (#592785)
+
+* Mon Aug 02 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.32.17-156
+- Linux 2.6.32.17
+- Drop the patches commented out for -rc1 except ssb_check_for_sprom.patch
+
+* Mon Aug 02 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.32.17-155.rc1
+- Linux 2.6.32.17-rc1
+- Comment out patches merged upstream:
+ linux-2.6-ext4-quota-metadata-reservation.patch
+ ext4-issue-discard-operation-before-releasing-blocks.patch
+ mac80211-do-not-wipe-out-old-supported-rates.patch
+ mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch
+ iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch
+ sched-fix-over-scheduling-bug.patch
+ ethtool-fix-buffer-overflow.patch
+ cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch
+ usb-obey-the-sysfs-power-wakeup-setting.patch
+- Revert -stable patches we already have:
+ drm-i915-enable-low-power-render-writes-on-gen3-hardware.patch
+ drm-i915-define-mi_arb_state-bits.patch
+- Comment out due to conflicts with -stable:
+ ssb_check_for_sprom.patch
+
* Tue Jul 27 2010 Chuck Ebbert <cebbert@redhat.com> 2.6.32.16-154
- xfs-prevent-swapext-from-operating-on-write-only-files.patch:
CVE-2010-2266
diff --git a/freed-ora/current/F-12/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch b/freed-ora/current/F-12/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch
new file mode 100644
index 000000000..eefdda5ce
--- /dev/null
+++ b/freed-ora/current/F-12/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch
@@ -0,0 +1,49 @@
+From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Date: Wed, 30 Jun 2010 08:02:45 +0000 (+0800)
+Subject: KVM: MMU: fix conflict access permissions in direct sp
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=6aa0b9dec5d6dde26ea17b0b5be8fccfe19df3c9
+
+KVM: MMU: fix conflict access permissions in direct sp
+
+In no-direct mapping, we mark sp is 'direct' when we mapping the
+guest's larger page, but its access is encoded form upper page-struct
+entire not include the last mapping, it will cause access conflict.
+
+For example, have this mapping:
+ [W]
+ / PDE1 -> |---|
+ P[W] | | LPA
+ \ PDE2 -> |---|
+ [R]
+
+P have two children, PDE1 and PDE2, both PDE1 and PDE2 mapping the
+same lage page(LPA). The P's access is WR, PDE1's access is WR,
+PDE2's access is RO(just consider read-write permissions here)
+
+When guest access PDE1, we will create a direct sp for LPA, the sp's
+access is from P, is W, then we will mark the ptes is W in this sp.
+
+Then, guest access PDE2, we will find LPA's shadow page, is the same as
+PDE's, and mark the ptes is RO.
+
+So, if guest access PDE1, the incorrect #PF is occured.
+
+Fixed by encode the last mapping access into direct shadow page
+
+Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+---
+
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 89d66ca..2331bdc 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -342,6 +342,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+ /* advance table_gfn when emulating 1gb pages with 4k */
+ if (delta == 0)
+ table_gfn += PT_INDEX(addr, level);
++ access &= gw->pte_access;
+ } else {
+ direct = 0;
+ table_gfn = gw->table_gfn[level - 2];
diff --git a/freed-ora/current/F-12/l2tp-fix-oops-in-pppol2tp_xmit.patch b/freed-ora/current/F-12/l2tp-fix-oops-in-pppol2tp_xmit.patch
new file mode 100644
index 000000000..bf076487c
--- /dev/null
+++ b/freed-ora/current/F-12/l2tp-fix-oops-in-pppol2tp_xmit.patch
@@ -0,0 +1,78 @@
+From: James Chapman <jchapman@katalix.com>
+Date: Tue, 16 Mar 2010 06:46:31 +0000 (+0000)
+Subject: l2tp: Fix oops in pppol2tp_xmit
+X-Git-Tag: v2.6.34-rc2~28^2~10
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3feec909
+
+l2tp: Fix oops in pppol2tp_xmit
+
+When transmitting L2TP frames, we derive the outgoing interface's UDP
+checksum hardware assist capabilities from the tunnel dst dev. This
+can sometimes be NULL, especially when routing protocols are used and
+routing changes occur. This patch just checks for NULL dst or dev
+pointers when checking for netdev hardware assist features.
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000c
+IP: [<f89d074c>] pppol2tp_xmit+0x341/0x4da [pppol2tp]
+*pde = 00000000
+Oops: 0000 [#1] SMP
+last sysfs file: /sys/class/net/lo/operstate
+Modules linked in: pppol2tp pppox ppp_generic slhc ipv6 dummy loop snd_hda_codec_atihdmi snd_hda_intel snd_hda_codec snd_pcm snd_timer snd soundcore snd_page_alloc evdev psmouse serio_raw processor button i2c_piix4 i2c_core ati_agp agpgart pcspkr ext3 jbd mbcache sd_mod ide_pci_generic atiixp ide_core ahci ata_generic floppy ehci_hcd ohci_hcd libata e1000e scsi_mod usbcore nls_base thermal fan thermal_sys [last unloaded: scsi_wait_scan]
+
+Pid: 0, comm: swapper Not tainted (2.6.32.8 #1)
+EIP: 0060:[<f89d074c>] EFLAGS: 00010297 CPU: 3
+EIP is at pppol2tp_xmit+0x341/0x4da [pppol2tp]
+EAX: 00000000 EBX: f64d1680 ECX: 000005b9 EDX: 00000000
+ESI: f6b91850 EDI: f64d16ac EBP: f6a0c4c0 ESP: f70a9cac
+ DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068
+Process swapper (pid: 0, ti=f70a8000 task=f70a31c0 task.ti=f70a8000)
+Stack:
+ 000005a9 000005b9 f734c400 f66652c0 f7352e00 f67dc800 00000000 f6b91800
+<0> 000005a3 f70ef6c4 f67dcda9 000005a3 f89b192e 00000246 000005a3 f64d1680
+<0> f63633e0 f6363320 f64d1680 f65a7320 f65a7364 f65856c0 f64d1680 f679f02f
+Call Trace:
+ [<f89b192e>] ? ppp_push+0x459/0x50e [ppp_generic]
+ [<f89b217f>] ? ppp_xmit_process+0x3b6/0x430 [ppp_generic]
+ [<f89b2306>] ? ppp_start_xmit+0x10d/0x120 [ppp_generic]
+ [<c11c15cb>] ? dev_hard_start_xmit+0x21f/0x2b2
+ [<c11d0947>] ? sch_direct_xmit+0x48/0x10e
+ [<c11c19a0>] ? dev_queue_xmit+0x263/0x3a6
+ [<c11e2a9f>] ? ip_finish_output+0x1f7/0x221
+ [<c11df682>] ? ip_forward_finish+0x2e/0x30
+ [<c11de645>] ? ip_rcv_finish+0x295/0x2a9
+ [<c11c0b19>] ? netif_receive_skb+0x3e9/0x404
+ [<f814b791>] ? e1000_clean_rx_irq+0x253/0x2fc [e1000e]
+ [<f814cb7a>] ? e1000_clean+0x63/0x1fc [e1000e]
+ [<c1047eff>] ? sched_clock_local+0x15/0x11b
+ [<c11c1095>] ? net_rx_action+0x96/0x195
+ [<c1035750>] ? __do_softirq+0xaa/0x151
+ [<c1035828>] ? do_softirq+0x31/0x3c
+ [<c10358fe>] ? irq_exit+0x26/0x58
+ [<c1004b21>] ? do_IRQ+0x78/0x89
+ [<c1003729>] ? common_interrupt+0x29/0x30
+ [<c101ac28>] ? native_safe_halt+0x2/0x3
+ [<c1008c54>] ? default_idle+0x55/0x75
+ [<c1009045>] ? c1e_idle+0xd2/0xd5
+ [<c100233c>] ? cpu_idle+0x46/0x62
+Code: 8d 45 08 f0 ff 45 08 89 6b 08 c7 43 68 7e fb 9c f8 8a 45 24 83 e0 0c 3c 04 75 09 80 63 64 f3 e9 b4 00 00 00 8b 43 18 8b 4c 24 04 <8b> 40 0c 8d 79 11 f6 40 44 0e 8a 43 64 75 51 6a 00 8b 4c 24 08
+EIP: [<f89d074c>] pppol2tp_xmit+0x341/0x4da [pppol2tp] SS:ESP 0068:f70a9cac
+CR2: 000000000000000c
+
+Signed-off-by: James Chapman <jchapman@katalix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
+index 9fbb2eb..5861ee9 100644
+--- a/drivers/net/pppol2tp.c
++++ b/drivers/net/pppol2tp.c
+@@ -1180,7 +1180,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ /* Calculate UDP checksum if configured to do so */
+ if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
+ skb->ip_summed = CHECKSUM_NONE;
+- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
++ else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
++ (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = skb_checksum(skb, 0, udp_len, 0);
+ uh->check = csum_tcpudp_magic(inet->inet_saddr,
diff --git a/freed-ora/current/F-12/linux-2.6-acpi-video-dos.patch b/freed-ora/current/F-12/linux-2.6-acpi-video-dos.patch
new file mode 100644
index 000000000..3e2085193
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-acpi-video-dos.patch
@@ -0,0 +1,17 @@
+Disable firmware video brightness change on AC/Battery switch by default
+
+-- mjg59
+
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index bac2901..93b1a9e 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -1818,7 +1818,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
+
+ static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
+ {
+- return acpi_video_bus_DOS(video, 0, 0);
++ return acpi_video_bus_DOS(video, 0, 1);
+ }
+
+ static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
diff --git a/freed-ora/current/F-12/linux-2.6-ata-quirk.patch b/freed-ora/current/F-12/linux-2.6-ata-quirk.patch
new file mode 100644
index 000000000..32096d463
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-ata-quirk.patch
@@ -0,0 +1,58 @@
+--- linux-2.6.20/arch/ia64/kernel/quirks.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.20_fix/arch/ia64/kernel/quirks.c 2007-02-13 13:56:34.000000000 -0500
+@@ -0,0 +1,45 @@
++/*
++ * This file contains work-arounds for ia64 platform bugs.
++ */
++#include <linux/pci.h>
++
++/*
++ * quirk_intel_ide_controller: If an ide/ata controller is
++ * at legacy mode, BIOS might initiates BAR(bar 0~3 and 5)
++ * with incorrect value. This quirk will reset the incorrect
++ * value to 0.
++ */
++static void __devinit quirk_intel_ide_controller(struct pci_dev *dev)
++{
++ unsigned int pos;
++ struct resource *res;
++ int fixed = 0;
++ u8 tmp8;
++
++ if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
++ return;
++
++ /* TODO: What if one channel is in native mode ... */
++ pci_read_config_byte(dev, PCI_CLASS_PROG, &tmp8);
++ if ((tmp8 & 5) == 5)
++ return;
++
++ for( pos = 0; pos < 6; pos ++ ) {
++ res = &dev->resource[pos];
++ if (!(res->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
++ continue;
++
++ if (!res->start && res->end) {
++ res->start = res->end = 0;
++ res->flags = 0;
++ fixed = 1;
++ }
++ }
++ if (fixed)
++ printk(KERN_WARNING
++ "PCI device %s: BIOS resource configuration fixed.\n",
++ pci_name(dev));
++}
++
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_11, quirk_intel_ide_controller);
++
+--- linux-2.6.21.noarch/arch/ia64/kernel/Makefile~ 2007-05-27 23:23:36.000000000 -0400
++++ linux-2.6.21.noarch/arch/ia64/kernel/Makefile 2007-05-27 23:23:48.000000000 -0400
+@@ -33,6 +33,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
+ obj-$(CONFIG_AUDIT) += audit.o
+ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
++obj-$(CONFIG_PCI) += quirks.o
+ mca_recovery-y += mca_drv.o mca_drv_asm.o
+ obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
+
diff --git a/freed-ora/current/F-12/linux-2.6-autoload-wmi.patch b/freed-ora/current/F-12/linux-2.6-autoload-wmi.patch
new file mode 100644
index 000000000..f093a4023
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-autoload-wmi.patch
@@ -0,0 +1,244 @@
+From: Matthew Garrett <mjg@redhat.com>
+Date: Wed, 4 Nov 2009 19:17:53 +0000 (-0500)
+Subject: wmi: Add support for module autoloading
+X-Git-Tag: v2.6.33-rc1~47^2~5^2
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=1caab3c1a90be3aa4ec3599409d8fe044b077478
+
+wmi: Add support for module autoloading
+
+WMI provides interface-specific GUIDs that are exported from modules as
+modalises, but the core currently generates no events to trigger module
+loading. This patch adds support for registering devices for each WMI GUID
+and generating the appropriate uevent.
+
+Based heavily on a patch by Carlos Corbacho (<carlos@strangeworlds.co.uk>).
+
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+Tested-by: Carlos Corbacho <carlos@strangeworlds.co.uk>
+Acked-by: Carlos Corbacho <carlos@strangeworlds.co.uk>
+Signed-off-by: Len Brown <len.brown@intel.com>
+---
+
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 177f8d7..e425a86 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -30,6 +30,7 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
++#include <linux/device.h>
+ #include <linux/list.h>
+ #include <linux/acpi.h>
+ #include <acpi/acpi_bus.h>
+@@ -65,6 +66,7 @@ struct wmi_block {
+ acpi_handle handle;
+ wmi_notify_handler handler;
+ void *handler_data;
++ struct device *dev;
+ };
+
+ static struct wmi_block wmi_blocks;
+@@ -195,6 +197,34 @@ static bool wmi_parse_guid(const u8 *src, u8 *dest)
+ return true;
+ }
+
++/*
++ * Convert a raw GUID to the ACII string representation
++ */
++static int wmi_gtoa(const char *in, char *out)
++{
++ int i;
++
++ for (i = 3; i >= 0; i--)
++ out += sprintf(out, "%02X", in[i] & 0xFF);
++
++ out += sprintf(out, "-");
++ out += sprintf(out, "%02X", in[5] & 0xFF);
++ out += sprintf(out, "%02X", in[4] & 0xFF);
++ out += sprintf(out, "-");
++ out += sprintf(out, "%02X", in[7] & 0xFF);
++ out += sprintf(out, "%02X", in[6] & 0xFF);
++ out += sprintf(out, "-");
++ out += sprintf(out, "%02X", in[8] & 0xFF);
++ out += sprintf(out, "%02X", in[9] & 0xFF);
++ out += sprintf(out, "-");
++
++ for (i = 10; i <= 15; i++)
++ out += sprintf(out, "%02X", in[i] & 0xFF);
++
++ out = '\0';
++ return 0;
++}
++
+ static bool find_guid(const char *guid_string, struct wmi_block **out)
+ {
+ char tmp[16], guid_input[16];
+@@ -555,6 +585,138 @@ bool wmi_has_guid(const char *guid_string)
+ EXPORT_SYMBOL_GPL(wmi_has_guid);
+
+ /*
++ * sysfs interface
++ */
++static ssize_t show_modalias(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ char guid_string[37];
++ struct wmi_block *wblock;
++
++ wblock = dev_get_drvdata(dev);
++ if (!wblock)
++ return -ENOMEM;
++
++ wmi_gtoa(wblock->gblock.guid, guid_string);
++
++ return sprintf(buf, "wmi:%s\n", guid_string);
++}
++static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
++
++static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++ char guid_string[37];
++
++ struct wmi_block *wblock;
++
++ if (add_uevent_var(env, "MODALIAS="))
++ return -ENOMEM;
++
++ wblock = dev_get_drvdata(dev);
++ if (!wblock)
++ return -ENOMEM;
++
++ wmi_gtoa(wblock->gblock.guid, guid_string);
++
++ strcpy(&env->buf[env->buflen - 1], "wmi:");
++ memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36);
++ env->buflen += 40;
++
++ return 0;
++}
++
++static void wmi_dev_free(struct device *dev)
++{
++ kfree(dev);
++}
++
++static struct class wmi_class = {
++ .name = "wmi",
++ .dev_release = wmi_dev_free,
++ .dev_uevent = wmi_dev_uevent,
++};
++
++static int wmi_create_devs(void)
++{
++ int result;
++ char guid_string[37];
++ struct guid_block *gblock;
++ struct wmi_block *wblock;
++ struct list_head *p;
++ struct device *guid_dev;
++
++ /* Create devices for all the GUIDs */
++ list_for_each(p, &wmi_blocks.list) {
++ wblock = list_entry(p, struct wmi_block, list);
++
++ guid_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
++ if (!guid_dev)
++ return -ENOMEM;
++
++ wblock->dev = guid_dev;
++
++ guid_dev->class = &wmi_class;
++ dev_set_drvdata(guid_dev, wblock);
++
++ gblock = &wblock->gblock;
++
++ wmi_gtoa(gblock->guid, guid_string);
++ dev_set_name(guid_dev, guid_string);
++
++ result = device_register(guid_dev);
++ if (result)
++ return result;
++
++ result = device_create_file(guid_dev, &dev_attr_modalias);
++ if (result)
++ return result;
++ }
++
++ return 0;
++}
++
++static void wmi_remove_devs(void)
++{
++ struct guid_block *gblock;
++ struct wmi_block *wblock;
++ struct list_head *p;
++ struct device *guid_dev;
++
++ /* Delete devices for all the GUIDs */
++ list_for_each(p, &wmi_blocks.list) {
++ wblock = list_entry(p, struct wmi_block, list);
++
++ guid_dev = wblock->dev;
++ gblock = &wblock->gblock;
++
++ device_remove_file(guid_dev, &dev_attr_modalias);
++
++ device_unregister(guid_dev);
++ }
++}
++
++static void wmi_class_exit(void)
++{
++ wmi_remove_devs();
++ class_unregister(&wmi_class);
++}
++
++static int wmi_class_init(void)
++{
++ int ret;
++
++ ret = class_register(&wmi_class);
++ if (ret)
++ return ret;
++
++ ret = wmi_create_devs();
++ if (ret)
++ wmi_class_exit();
++
++ return ret;
++}
++
++/*
+ * Parse the _WDG method for the GUID data blocks
+ */
+ static __init acpi_status parse_wdg(acpi_handle handle)
+@@ -709,10 +871,17 @@ static int __init acpi_wmi_init(void)
+
+ if (result < 0) {
+ printk(KERN_INFO PREFIX "Error loading mapper\n");
+- } else {
+- printk(KERN_INFO PREFIX "Mapper loaded\n");
++ return -ENODEV;
++ }
++
++ result = wmi_class_init();
++ if (result) {
++ acpi_bus_unregister_driver(&acpi_wmi_driver);
++ return result;
+ }
+
++ printk(KERN_INFO PREFIX "Mapper loaded\n");
++
+ return result;
+ }
+
+@@ -721,6 +890,8 @@ static void __exit acpi_wmi_exit(void)
+ struct list_head *p, *tmp;
+ struct wmi_block *wblock;
+
++ wmi_class_exit();
++
+ acpi_bus_unregister_driver(&acpi_wmi_driver);
+
+ list_for_each_safe(p, tmp, &wmi_blocks.list) {
diff --git a/freed-ora/current/F-12/linux-2.6-b43_-Rewrite-DMA-Tx-status-handling-sanity-checks.patch b/freed-ora/current/F-12/linux-2.6-b43_-Rewrite-DMA-Tx-status-handling-sanity-checks.patch
new file mode 100644
index 000000000..441213ca0
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-b43_-Rewrite-DMA-Tx-status-handling-sanity-checks.patch
@@ -0,0 +1,182 @@
+Back-port of the following upstream commit...
+
+commit 07681e211d736ba2394ab7f29f77e93adecd22c5
+Author: Michael Buesch <mb@bu3sch.de>
+Date: Thu Nov 19 22:24:29 2009 +0100
+
+ b43: Rewrite DMA Tx status handling sanity checks
+
+ This rewrites the error handling policies in the TX status handler.
+ It tries to be error-tolerant as in "try hard to not crash the machine".
+ It won't recover from errors (that are bugs in the firmware or driver),
+ because that's impossible. However, it will return a more or less useful
+ error message and bail out. It also tries hard to use rate-limited messages
+ to not flood the syslog in case of a failure.
+
+ Signed-off-by: Michael Buesch <mb@bu3sch.de>
+ Signed-off-by: John W. Linville <linville@tuxdriver.com>
+
+diff -up linux-2.6.32.noarch/drivers/net/wireless/b43/dma.c.orig linux-2.6.32.noarch/drivers/net/wireless/b43/dma.c
+--- linux-2.6.32.noarch/drivers/net/wireless/b43/dma.c.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/drivers/net/wireless/b43/dma.c 2010-03-17 14:02:28.000000000 -0400
+@@ -770,7 +770,7 @@ static void free_all_descbuffers(struct
+ for (i = 0; i < ring->nr_slots; i++) {
+ desc = ring->ops->idx2desc(ring, i, &meta);
+
+- if (!meta->skb) {
++ if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
+ B43_WARN_ON(!ring->tx);
+ continue;
+ }
+@@ -822,7 +822,7 @@ struct b43_dmaring *b43_setup_dmaring(st
+ enum b43_dmatype type)
+ {
+ struct b43_dmaring *ring;
+- int err;
++ int i, err;
+ dma_addr_t dma_test;
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+@@ -837,6 +837,8 @@ struct b43_dmaring *b43_setup_dmaring(st
+ GFP_KERNEL);
+ if (!ring->meta)
+ goto err_kfree_ring;
++ for (i = 0; i < ring->nr_slots; i++)
++ ring->meta->skb = B43_DMA_PTR_POISON;
+
+ ring->type = type;
+ ring->dev = dev;
+@@ -1147,11 +1149,13 @@ struct b43_dmaring *parse_cookie(struct
+ case 0x5000:
+ ring = dma->tx_ring_mcast;
+ break;
+- default:
+- B43_WARN_ON(1);
+ }
+ *slot = (cookie & 0x0FFF);
+- B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
++ if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
++ b43dbg(dev->wl, "TX-status contains "
++ "invalid cookie: 0x%04X\n", cookie);
++ return NULL;
++ }
+
+ return ring;
+ }
+@@ -1400,19 +1404,40 @@ void b43_dma_handle_txstatus(struct b43_
+ struct b43_dmaring *ring;
+ struct b43_dmadesc_generic *desc;
+ struct b43_dmadesc_meta *meta;
+- int slot;
++ int slot, firstused;
+ bool frame_succeed;
+
+ ring = parse_cookie(dev, status->cookie, &slot);
+ if (unlikely(!ring))
+ return;
+-
+ B43_WARN_ON(!ring->tx);
++
++ /* Sanity check: TX packets are processed in-order on one ring.
++ * Check if the slot deduced from the cookie really is the first
++ * used slot. */
++ firstused = ring->current_slot - ring->used_slots + 1;
++ if (firstused < 0)
++ firstused = ring->nr_slots + firstused;
++ if (unlikely(slot != firstused)) {
++ /* This possibly is a firmware bug and will result in
++ * malfunction, memory leaks and/or stall of DMA functionality. */
++ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
++ "Expected %d, but got %d\n",
++ ring->index, firstused, slot);
++ return;
++ }
++
+ ops = ring->ops;
+ while (1) {
+- B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
++ B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
+ desc = ops->idx2desc(ring, slot, &meta);
+
++ if (b43_dma_ptr_is_poisoned(meta->skb)) {
++ b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
++ "on ring %d\n",
++ slot, firstused, ring->index);
++ break;
++ }
+ if (meta->skb)
+ unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
+ 1);
+@@ -1423,7 +1448,14 @@ void b43_dma_handle_txstatus(struct b43_
+ if (meta->is_last_fragment) {
+ struct ieee80211_tx_info *info;
+
+- BUG_ON(!meta->skb);
++ if (unlikely(!meta->skb)) {
++ /* This is a scatter-gather fragment of a frame, so
++ * the skb pointer must not be NULL. */
++ b43dbg(dev->wl, "TX status unexpected NULL skb "
++ "at slot %d (first=%d) on ring %d\n",
++ slot, firstused, ring->index);
++ break;
++ }
+
+ info = IEEE80211_SKB_CB(meta->skb);
+
+@@ -1441,20 +1473,29 @@ void b43_dma_handle_txstatus(struct b43_
+ #endif /* DEBUG */
+ ieee80211_tx_status(dev->wl->hw, meta->skb);
+
+- /* skb is freed by ieee80211_tx_status() */
+- meta->skb = NULL;
++ /* skb will be freed by ieee80211_tx_status().
++ * Poison our pointer. */
++ meta->skb = B43_DMA_PTR_POISON;
+ } else {
+ /* No need to call free_descriptor_buffer here, as
+ * this is only the txhdr, which is not allocated.
+ */
+- B43_WARN_ON(meta->skb);
++ if (unlikely(meta->skb)) {
++ b43dbg(dev->wl, "TX status unexpected non-NULL skb "
++ "at slot %d (first=%d) on ring %d\n",
++ slot, firstused, ring->index);
++ break;
++ }
+ }
+
+ /* Everything unmapped and free'd. So it's not used anymore. */
+ ring->used_slots--;
+
+- if (meta->is_last_fragment)
++ if (meta->is_last_fragment) {
++ /* This is the last scatter-gather
++ * fragment of the frame. We are done. */
+ break;
++ }
+ slot = next_slot(ring, slot);
+ }
+ if (ring->stopped) {
+diff -up linux-2.6.32.noarch/drivers/net/wireless/b43/dma.h.orig linux-2.6.32.noarch/drivers/net/wireless/b43/dma.h
+--- linux-2.6.32.noarch/drivers/net/wireless/b43/dma.h.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/drivers/net/wireless/b43/dma.h 2010-03-17 13:57:57.000000000 -0400
+@@ -1,7 +1,7 @@
+ #ifndef B43_DMA_H_
+ #define B43_DMA_H_
+
+-#include <linux/ieee80211.h>
++#include <linux/err.h>
+
+ #include "b43.h"
+
+@@ -165,6 +165,10 @@ struct b43_dmadesc_generic {
+ #define B43_RXRING_SLOTS 64
+ #define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN
+
++/* Pointer poison */
++#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))
++#define b43_dma_ptr_is_poisoned(ptr) (unlikely((ptr) == B43_DMA_PTR_POISON))
++
+
+ struct sk_buff;
+ struct b43_private;
diff --git a/freed-ora/current/F-12/linux-2.6-block-silently-error-unsupported-empty-barriers-too.patch b/freed-ora/current/F-12/linux-2.6-block-silently-error-unsupported-empty-barriers-too.patch
new file mode 100644
index 000000000..e6ab893dd
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-block-silently-error-unsupported-empty-barriers-too.patch
@@ -0,0 +1,48 @@
+From: Mark McLoughlin <markmc@redhat.com>
+Subject: [PATCH] block: silently error unsupported empty barriers too
+
+With 2.6.31-rc5 in a KVM guest using dm and virtio_blk, we see the
+following errors:
+
+ end_request: I/O error, dev vda, sector 0
+ end_request: I/O error, dev vda, sector 0
+
+The errors go away if dm stops submitting empty barriers, by reverting:
+
+ commit 52b1fd5a27c625c78373e024bf570af3c9d44a79
+ Author: Mikulas Patocka <mpatocka@redhat.com>
+ dm: send empty barriers to targets in dm_flush
+
+We should error all barriers, even empty barriers, on devices like
+virtio_blk which don't support them.
+
+See also:
+
+ https://bugzilla.redhat.com/514901
+
+Signed-off-by: Mark McLoughlin <markmc@redhat.com>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Mikulas Patocka <mpatocka@redhat.com>
+Cc: Alasdair G Kergon <agk@redhat.com>
+Cc: Neil Brown <neilb@suse.de>
+---
+ block/blk-core.c | 3 +--
+ 1 files changed, 1 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index e3299a7..35ad2bb 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1163,8 +1163,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
+ const int unplug = bio_unplug(bio);
+ int rw_flags;
+
+- if (bio_barrier(bio) && bio_has_data(bio) &&
+- (q->next_ordered == QUEUE_ORDERED_NONE)) {
++ if (bio_barrier(bio) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return 0;
+ }
+--
+1.6.4
+
diff --git a/freed-ora/current/F-12/linux-2.6-btrfs-fix-acl.patch b/freed-ora/current/F-12/linux-2.6-btrfs-fix-acl.patch
new file mode 100644
index 000000000..3e015da42
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-btrfs-fix-acl.patch
@@ -0,0 +1,25 @@
+diff -up linux-2.6.32.noarch/fs/btrfs/acl.c.orig linux-2.6.32.noarch/fs/btrfs/acl.c
+--- linux-2.6.32.noarch/fs/btrfs/acl.c.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/fs/btrfs/acl.c 2010-01-14 15:36:25.926371944 -0500
+@@ -110,13 +110,15 @@ static int btrfs_set_acl(struct inode *i
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+- mode = inode->i_mode;
+- ret = posix_acl_equiv_mode(acl, &mode);
+- if (ret < 0)
+- return ret;
+- ret = 0;
+- inode->i_mode = mode;
+ name = POSIX_ACL_XATTR_ACCESS;
++ if (acl) {
++ mode = inode->i_mode;
++ ret = posix_acl_equiv_mode(acl, &mode);
++ if (ret < 0)
++ return ret;
++ ret = 0;
++ inode->i_mode = mode;
++ }
+ break;
+ case ACL_TYPE_DEFAULT:
+ if (!S_ISDIR(inode->i_mode))
diff --git a/freed-ora/current/F-12/linux-2.6-build-nonintconfig.patch b/freed-ora/current/F-12/linux-2.6-build-nonintconfig.patch
new file mode 100644
index 000000000..e88e0ea1e
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-build-nonintconfig.patch
@@ -0,0 +1,128 @@
+diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
+index 6d69c7c..ff84d12 100644
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -58,6 +58,11 @@ localyesconfig: $(obj)/streamline_config.pl $(obj)/conf
+ fi
+ $(Q)rm -f .tmp.config
+
++nonint_oldconfig: $(obj)/conf
++ $< -b $(Kconfig)
++loose_nonint_oldconfig: $(obj)/conf
++ $< -B $(Kconfig)
++
+ # Create new linux.pot file
+ # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files
+ # The symlink is used to repair a deficiency in arch/um
+diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
+index 9960d1c..ac8d455 100644
+--- a/scripts/kconfig/conf.c
++++ b/scripts/kconfig/conf.c
+@@ -23,6 +23,8 @@ enum {
+ ask_all,
+ ask_new,
+ ask_silent,
++ dont_ask,
++ dont_ask_dont_tell,
+ set_default,
+ set_yes,
+ set_mod,
+@@ -360,7 +362,10 @@ static void conf(struct menu *menu)
+
+ switch (prop->type) {
+ case P_MENU:
+- if (input_mode == ask_silent && rootEntry != menu) {
++ if ((input_mode == ask_silent ||
++ input_mode == dont_ask ||
++ input_mode == dont_ask_dont_tell) &&
++ rootEntry != menu) {
+ check_conf(menu);
+ return;
+ }
+@@ -406,6 +411,8 @@ conf_childs:
+ indent -= 2;
+ }
+
++static int return_value;
++
+ static void check_conf(struct menu *menu)
+ {
+ struct symbol *sym;
+@@ -418,12 +425,21 @@ static void check_conf(struct menu *menu)
+ if (sym && !sym_has_value(sym)) {
+ if (sym_is_changable(sym) ||
+ (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
++ if (input_mode == dont_ask ||
++ input_mode == dont_ask_dont_tell) {
++ if (input_mode == dont_ask &&
++ sym->name && !sym_is_choice_value(sym)) {
++ fprintf(stderr,"CONFIG_%s\n",sym->name);
++ ++return_value;
++ }
++ } else {
+ if (!conf_cnt++)
+ printf(_("*\n* Restart config...\n*\n"));
+ rootEntry = menu_get_parent_menu(menu);
+ conf(rootEntry);
+ }
+ }
++ }
+
+ for (child = menu->list; child; child = child->next)
+ check_conf(child);
+@@ -439,7 +455,7 @@ int main(int ac, char **av)
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+
+- while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) {
++ while ((opt = getopt(ac, av, "osbBdD:nmyrh")) != -1) {
+ switch (opt) {
+ case 'o':
+ input_mode = ask_silent;
+@@ -448,6 +464,12 @@ int main(int ac, char **av)
+ input_mode = ask_silent;
+ sync_kconfig = 1;
+ break;
++ case 'b':
++ input_mode = dont_ask;
++ break;
++ case 'B':
++ input_mode = dont_ask_dont_tell;
++ break;
+ case 'd':
+ input_mode = set_default;
+ break;
+@@ -525,6 +547,8 @@ int main(int ac, char **av)
+ case ask_silent:
+ case ask_all:
+ case ask_new:
++ case dont_ask:
++ case dont_ask_dont_tell:
+ conf_read(NULL);
+ break;
+ case set_no:
+@@ -586,12 +610,16 @@ int main(int ac, char **av)
+ conf(&rootmenu);
+ input_mode = ask_silent;
+ /* fall through */
++ case dont_ask:
++ case dont_ask_dont_tell:
+ case ask_silent:
+ /* Update until a loop caused no more changes */
+ do {
+ conf_cnt = 0;
+ check_conf(&rootmenu);
+- } while (conf_cnt);
++ } while (conf_cnt &&
++ (input_mode != dont_ask &&
++ input_mode != dont_ask_dont_tell));
+ break;
+ }
+
+@@ -613,5 +641,5 @@ int main(int ac, char **av)
+ exit(1);
+ }
+ }
+- return 0;
++ return return_value;
+ }
diff --git a/freed-ora/current/F-12/linux-2.6-cantiga-iommu-gfx.patch b/freed-ora/current/F-12/linux-2.6-cantiga-iommu-gfx.patch
new file mode 100644
index 000000000..a18e38ba9
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-cantiga-iommu-gfx.patch
@@ -0,0 +1,26 @@
+diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
+index 4173125..baa32a0 100644
+--- a/drivers/pci/intel-iommu.c
++++ b/drivers/pci/intel-iommu.c
+@@ -340,7 +340,7 @@ int dmar_disabled = 0;
+ int dmar_disabled = 1;
+ #endif /*CONFIG_DMAR_DEFAULT_ON*/
+
+-static int __initdata dmar_map_gfx = 1;
++static int dmar_map_gfx = 1;
+ static int dmar_forcedac;
+ static int intel_iommu_strict;
+
+@@ -3728,6 +3728,12 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ */
+ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ rwbf_quirk = 1;
++
++ /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
++ if (dev->revision == 0x07) {
++ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
++ dmar_map_gfx = 0;
++ }
+ }
+
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
diff --git a/freed-ora/current/F-12/linux-2.6-compile-fixes.patch b/freed-ora/current/F-12/linux-2.6-compile-fixes.patch
new file mode 100644
index 000000000..34c08ce47
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-compile-fixes.patch
@@ -0,0 +1,6 @@
+#
+# Small compile fixes (For more involved fixes, please use a separate patch).
+#
+# Please add the errors from gcc before the diffs to save others having
+# to do a compile to figure out what your diff is fixing. Thanks.
+#
diff --git a/freed-ora/current/F-12/linux-2.6-crash-driver.patch b/freed-ora/current/F-12/linux-2.6-crash-driver.patch
new file mode 100644
index 000000000..5669f7aaf
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-crash-driver.patch
@@ -0,0 +1,363 @@
+diff --git a/arch/ia64/include/asm/crash.h b/arch/ia64/include/asm/crash.h
+new file mode 100644
+index 0000000..541af84
+--- /dev/null
++++ b/arch/ia64/include/asm/crash.h
+@@ -0,0 +1,90 @@
++#ifndef _ASM_IA64_CRASH_H
++#define _ASM_IA64_CRASH_H
++
++/*
++ * linux/include/asm-ia64/crash.h
++ *
++ * Copyright (c) 2004 Red Hat, Inc. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ */
++
++#ifdef __KERNEL__
++
++#include <linux/efi.h>
++#include <linux/mm.h>
++#include <asm/mmzone.h>
++
++static inline void *
++map_virtual(u64 offset, struct page **pp)
++{
++ struct page *page;
++ unsigned long pfn;
++ u32 type;
++
++ if (REGION_NUMBER(offset) == 5) {
++ char byte;
++
++ if (__get_user(byte, (char *)offset) == 0)
++ return (void *)offset;
++ else
++ return NULL;
++ }
++
++ switch (type = efi_mem_type(offset))
++ {
++ case EFI_LOADER_CODE:
++ case EFI_LOADER_DATA:
++ case EFI_BOOT_SERVICES_CODE:
++ case EFI_BOOT_SERVICES_DATA:
++ case EFI_CONVENTIONAL_MEMORY:
++ break;
++
++ default:
++ printk(KERN_INFO
++ "crash memory driver: invalid memory type for %lx: %d\n",
++ offset, type);
++ return NULL;
++ }
++
++ pfn = offset >> PAGE_SHIFT;
++
++ if (!pfn_valid(pfn)) {
++ printk(KERN_INFO
++ "crash memory driver: invalid pfn: %lx )\n", pfn);
++ return NULL;
++ }
++
++ page = pfn_to_page(pfn);
++
++ if (!page->virtual) {
++ printk(KERN_INFO
++ "crash memory driver: offset: %lx page: %lx page->virtual: NULL\n",
++ offset, (unsigned long)page);
++ return NULL;
++ }
++
++ return (page->virtual + (offset & (PAGE_SIZE-1)));
++}
++
++static inline void unmap_virtual(struct page *page)
++{
++ return;
++}
++
++#endif /* __KERNEL__ */
++
++#endif /* _ASM_IA64_CRASH_H */
+diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
+index 14d39e3..cf3d040 100644
+--- a/arch/ia64/kernel/ia64_ksyms.c
++++ b/arch/ia64/kernel/ia64_ksyms.c
+@@ -84,6 +84,9 @@ EXPORT_SYMBOL(ia64_save_scratch_fpregs);
+ #include <asm/unwind.h>
+ EXPORT_SYMBOL(unw_init_running);
+
++#include <linux/efi.h>
++EXPORT_SYMBOL_GPL(efi_mem_type);
++
+ #if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
+ extern void esi_call_phys (void);
+ EXPORT_SYMBOL_GPL(esi_call_phys);
+diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
+new file mode 100644
+index 0000000..dfcc006
+--- /dev/null
++++ b/arch/x86/include/asm/crash.h
+@@ -0,0 +1,75 @@
++#ifndef _ASM_I386_CRASH_H
++#define _ASM_I386_CRASH_H
++
++/*
++ * linux/include/asm-i386/crash.h
++ *
++ * Copyright (c) 2004 Red Hat, Inc. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ */
++
++#ifdef __KERNEL__
++
++#include <linux/mm.h>
++#include <linux/highmem.h>
++#include <asm/mmzone.h>
++
++extern int page_is_ram(unsigned long);
++
++static inline void *
++map_virtual(u64 offset, struct page **pp)
++{
++ struct page *page;
++ unsigned long pfn;
++ void *vaddr;
++
++ pfn = (unsigned long)(offset >> PAGE_SHIFT);
++
++ if (!page_is_ram(pfn)) {
++ printk(KERN_INFO
++ "crash memory driver: !page_is_ram(pfn: %lx)\n", pfn);
++ return NULL;
++ }
++
++ if (!pfn_valid(pfn)) {
++ printk(KERN_INFO
++ "crash memory driver: invalid pfn: %lx )\n", pfn);
++ return NULL;
++ }
++
++ page = pfn_to_page(pfn);
++
++ vaddr = kmap(page);
++ if (!vaddr) {
++ printk(KERN_INFO
++ "crash memory driver: pfn: %lx kmap(page: %lx) failed\n",
++ pfn, (unsigned long)page);
++ return NULL;
++ }
++
++ *pp = page;
++ return (vaddr + (offset & (PAGE_SIZE-1)));
++}
++
++static inline void unmap_virtual(struct page *page)
++{
++ kunmap(page);
++}
++
++#endif /* __KERNEL__ */
++
++#endif /* _ASM_I386_CRASH_H */
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 334e63c..8e1ccbc 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -60,6 +60,7 @@ int page_is_ram(unsigned long pagenr)
+ }
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(page_is_ram);
+
+ /*
+ * Fix up the linear direct mapping of the kernel to avoid cache attribute
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index 08a6f50..8bc5e9a 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -484,6 +484,8 @@ config LEGACY_PTYS
+ security. This option enables these legacy devices; on most
+ systems, it is safe to say N.
+
++config CRASH
++ tristate "Crash Utility memory driver"
+
+ config LEGACY_PTY_COUNT
+ int "Maximum number of legacy PTY in use"
+diff --git a/drivers/char/Makefile b/drivers/char/Makefile
+index 19a79dd..0bee860 100644
+--- a/drivers/char/Makefile
++++ b/drivers/char/Makefile
+@@ -112,6 +112,8 @@ obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+ obj-$(CONFIG_JS_RTC) += js-rtc.o
+ js-rtc-y = rtc.o
+
++obj-$(CONFIG_CRASH) += crash.o
++
+ # Files generated that shall be removed upon make clean
+ clean-files := consolemap_deftbl.c defkeymap.c
+
+diff --git a/drivers/char/crash.c b/drivers/char/crash.c
+new file mode 100644
+index 0000000..e5437de
+--- /dev/null
++++ b/drivers/char/crash.c
+@@ -0,0 +1,128 @@
++/*
++ * linux/drivers/char/crash.c
++ *
++ * Copyright (C) 2004 Dave Anderson <anderson@redhat.com>
++ * Copyright (C) 2004 Red Hat, Inc.
++ */
++
++/******************************************************************************
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ *****************************************************************************/
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/miscdevice.h>
++#include <linux/init.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <asm/types.h>
++#include <asm/crash.h>
++
++#define CRASH_VERSION "1.0"
++
++/*
++ * These are the file operation functions that allow crash utility
++ * access to physical memory.
++ */
++
++static loff_t
++crash_llseek(struct file * file, loff_t offset, int orig)
++{
++ switch (orig) {
++ case 0:
++ file->f_pos = offset;
++ return file->f_pos;
++ case 1:
++ file->f_pos += offset;
++ return file->f_pos;
++ default:
++ return -EINVAL;
++ }
++}
++
++/*
++ * Determine the page address for an address offset value,
++ * get a virtual address for it, and copy it out.
++ * Accesses must fit within a page.
++ */
++static ssize_t
++crash_read(struct file *file, char *buf, size_t count, loff_t *poff)
++{
++ void *vaddr;
++ struct page *page;
++ u64 offset;
++ ssize_t read;
++
++ offset = *poff;
++ if (offset >> PAGE_SHIFT != (offset+count-1) >> PAGE_SHIFT)
++ return -EINVAL;
++
++ vaddr = map_virtual(offset, &page);
++ if (!vaddr)
++ return -EFAULT;
++
++ if (copy_to_user(buf, vaddr, count)) {
++ unmap_virtual(page);
++ return -EFAULT;
++ }
++ unmap_virtual(page);
++
++ read = count;
++ *poff += read;
++ return read;
++}
++
++static struct file_operations crash_fops = {
++ .owner = THIS_MODULE,
++ .llseek = crash_llseek,
++ .read = crash_read,
++};
++
++static struct miscdevice crash_dev = {
++ MISC_DYNAMIC_MINOR,
++ "crash",
++ &crash_fops
++};
++
++static int __init
++crash_init(void)
++{
++ int ret;
++
++ ret = misc_register(&crash_dev);
++ if (ret) {
++ printk(KERN_ERR
++ "crash memory driver: cannot misc_register (MISC_DYNAMIC_MINOR)\n");
++ goto out;
++ }
++
++ ret = 0;
++ printk(KERN_INFO "crash memory driver: version %s\n", CRASH_VERSION);
++out:
++ return ret;
++}
++
++static void __exit
++crash_cleanup_module(void)
++{
++ misc_deregister(&crash_dev);
++}
++
++module_init(crash_init);
++module_exit(crash_cleanup_module);
++
++MODULE_LICENSE("GPL");
diff --git a/freed-ora/current/F-12/linux-2.6-debug-always-inline-kzalloc.patch b/freed-ora/current/F-12/linux-2.6-debug-always-inline-kzalloc.patch
new file mode 100644
index 000000000..24f665ca6
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-debug-always-inline-kzalloc.patch
@@ -0,0 +1,25 @@
+From 76ec0e2e6d6edf81abc0331d5e7873ef7b2f6019 Mon Sep 17 00:00:00 2001
+From: Kyle McMartin <kyle@phobos.i.jkkm.org>
+Date: Wed, 8 Jul 2009 13:06:01 -0400
+Subject: [PATCH 6/6] fedora: linux-2.6-debug-always-inline-kzalloc.patch
+
+---
+ include/linux/slab.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 2da8372..d4ef74f 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -310,7 +310,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
+-static inline void *kzalloc(size_t size, gfp_t flags)
++static __always_inline void *kzalloc(size_t size, gfp_t flags)
+ {
+ return kmalloc(size, flags | __GFP_ZERO);
+ }
+--
+1.6.2.5
+
diff --git a/freed-ora/current/F-12/linux-2.6-debug-nmi-timeout.patch b/freed-ora/current/F-12/linux-2.6-debug-nmi-timeout.patch
new file mode 100644
index 000000000..447419c08
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-debug-nmi-timeout.patch
@@ -0,0 +1,45 @@
+From 899dd25ae272c73407c1477ec223982d0b57a668 Mon Sep 17 00:00:00 2001
+From: Kyle McMartin <kyle@phobos.i.jkkm.org>
+Date: Wed, 8 Jul 2009 13:03:06 -0400
+Subject: [PATCH 2/6] fedora: linux-2.6-debug-nmi-timeout.patch
+
+---
+ arch/x86/kernel/apic/nmi.c | 2 +-
+ lib/Kconfig.debug | 8 ++++++++
+ 2 files changed, 9 insertions(+), 1 deletions(-)
+
+diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
+index b3025b4..e82a450 100644
+--- a/arch/x86/kernel/apic/nmi.c
++++ b/arch/x86/kernel/apic/nmi.c
+@@ -436,7 +436,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
+ * wait a few IRQs (5 seconds) before doing the oops ...
+ */
+ local_inc(&__get_cpu_var(alert_counter));
+- if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
++ if (local_read(&__get_cpu_var(alert_counter)) == CONFIG_DEBUG_NMI_TIMEOUT * nmi_hz)
+ /*
+ * die_nmi will return ONLY if NOTIFY_STOP happens..
+ */
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 12327b2..2790b4f 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -245,6 +245,14 @@ config SCHEDSTATS
+ application, you can say N to avoid the very slight overhead
+ this adds.
+
++config DEBUG_NMI_TIMEOUT
++ int "Number of seconds before NMI timeout"
++ depends on X86
++ default 5
++ help
++ This value is the number of seconds the NMI watchdog will tick
++ before it decides the machine has hung.
++
+ config TIMER_STATS
+ bool "Collect kernel timers statistics"
+ depends on DEBUG_KERNEL && PROC_FS
+--
+1.6.2.5
+
diff --git a/freed-ora/current/F-12/linux-2.6-debug-sizeof-structs.patch b/freed-ora/current/F-12/linux-2.6-debug-sizeof-structs.patch
new file mode 100644
index 000000000..cc7747d1f
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-debug-sizeof-structs.patch
@@ -0,0 +1,31 @@
+diff --git a/init/main.c b/init/main.c
+index 7449819..98cfaae 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -369,6 +369,10 @@ static void __init setup_nr_cpu_ids(void)
+ nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
+ }
+
++#include <linux/ext3_fs_i.h>
++#include <linux/skbuff.h>
++#include <linux/sched.h>
++
+ /* Called by boot processor to activate the rest. */
+ static void __init smp_init(void)
+ {
+@@ -391,6 +395,15 @@ static void __init smp_init(void)
+ /* Any cleanup work */
+ printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
+ smp_cpus_done(setup_max_cpus);
++
++ printk(KERN_DEBUG "sizeof(vma)=%u bytes\n", (unsigned int) sizeof(struct vm_area_struct));
++ printk(KERN_DEBUG "sizeof(page)=%u bytes\n", (unsigned int) sizeof(struct page));
++ printk(KERN_DEBUG "sizeof(inode)=%u bytes\n", (unsigned int) sizeof(struct inode));
++ printk(KERN_DEBUG "sizeof(dentry)=%u bytes\n", (unsigned int) sizeof(struct dentry));
++ printk(KERN_DEBUG "sizeof(ext3inode)=%u bytes\n", (unsigned int) sizeof(struct ext3_inode_info));
++ printk(KERN_DEBUG "sizeof(buffer_head)=%u bytes\n", (unsigned int) sizeof(struct buffer_head));
++ printk(KERN_DEBUG "sizeof(skbuff)=%u bytes\n", (unsigned int) sizeof(struct sk_buff));
++ printk(KERN_DEBUG "sizeof(task_struct)=%u bytes\n", (unsigned int) sizeof(struct task_struct));
+ }
+
+ #endif
diff --git a/freed-ora/current/F-12/linux-2.6-debug-taint-vm.patch b/freed-ora/current/F-12/linux-2.6-debug-taint-vm.patch
new file mode 100644
index 000000000..ee367d45a
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-debug-taint-vm.patch
@@ -0,0 +1,65 @@
+From b04c57d9dc889462951312be2ac81ff6c702e954 Mon Sep 17 00:00:00 2001
+From: Kyle McMartin <kyle@phobos.i.jkkm.org>
+Date: Wed, 8 Jul 2009 13:05:09 -0400
+Subject: [PATCH 3/6] fedora: linux-2.6-debug-taint-vm.patch
+
+---
+ kernel/panic.c | 4 +++-
+ mm/slab.c | 8 ++++----
+ mm/slub.c | 2 +-
+ 4 files changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 984b3ec..6d1c3be 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -199,6 +199,7 @@ const char *print_tainted(void)
+
+ return buf;
+ }
++EXPORT_SYMBOL(print_tainted);
+
+ int test_taint(unsigned flag)
+ {
+diff --git a/mm/slab.c b/mm/slab.c
+index e74a16e..7bc287e 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -1803,8 +1803,8 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
+ /* Print header */
+ if (lines == 0) {
+ printk(KERN_ERR
+- "Slab corruption: %s start=%p, len=%d\n",
+- cachep->name, realobj, size);
++ "Slab corruption (%s): %s start=%p, len=%d\n",
++ print_tainted(), cachep->name, realobj, size);
+ print_objinfo(cachep, objp, 0);
+ }
+ /* Hexdump the affected line */
+@@ -2902,8 +2902,8 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
+ if (entries != cachep->num - slabp->inuse) {
+ bad:
+ printk(KERN_ERR "slab: Internal list corruption detected in "
+- "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
+- cachep->name, cachep->num, slabp, slabp->inuse);
++ "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
++ cachep->name, cachep->num, slabp, slabp->inuse, print_tainted());
+ for (i = 0;
+ i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
+ i++) {
+diff --git a/mm/slub.c b/mm/slub.c
+index 819f056..8eff0f4 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -433,7 +433,7 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
+ va_end(args);
+ printk(KERN_ERR "========================================"
+ "=====================================\n");
+- printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
++ printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
+ printk(KERN_ERR "----------------------------------------"
+ "-------------------------------------\n\n");
+ }
+--
+1.6.2.5
+
diff --git a/freed-ora/current/F-12/linux-2.6-debug-vm-would-have-oomkilled.patch b/freed-ora/current/F-12/linux-2.6-debug-vm-would-have-oomkilled.patch
new file mode 100644
index 000000000..2fcd9b956
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-debug-vm-would-have-oomkilled.patch
@@ -0,0 +1,52 @@
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 0d949c5..8fb2bd9 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -72,6 +72,7 @@ extern int sysctl_overcommit_ratio;
+ extern int sysctl_panic_on_oom;
+ extern int sysctl_oom_kill_allocating_task;
+ extern int sysctl_oom_dump_tasks;
++extern int sysctl_would_have_oomkilled;
+ extern int max_threads;
+ extern int core_uses_pid;
+ extern int suid_dumpable;
+@@ -1073,6 +1074,14 @@ static struct ctl_table vm_table[] = {
+ .proc_handler = &proc_dointvec,
+ },
+ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "would_have_oomkilled",
++ .data = &sysctl_would_have_oomkilled,
++ .maxlen = sizeof(sysctl_would_have_oomkilled),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec,
++ },
++ {
+ .ctl_name = VM_OVERCOMMIT_RATIO,
+ .procname = "overcommit_ratio",
+ .data = &sysctl_overcommit_ratio,
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index ea2147d..788fe84 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -31,6 +31,7 @@
+ int sysctl_panic_on_oom;
+ int sysctl_oom_kill_allocating_task;
+ int sysctl_oom_dump_tasks;
++int sysctl_would_have_oomkilled;
+ static DEFINE_SPINLOCK(zone_scan_lock);
+ /* #define DEBUG */
+
+@@ -356,6 +357,12 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
+ return;
+ }
+
++ if (sysctl_would_have_oomkilled == 1) {
++ printk(KERN_ERR "Would have killed process %d (%s). But continuing instead.\n",
++ task_pid_nr(p), p->comm);
++ return;
++ }
++
+ if (verbose)
+ printk(KERN_ERR "Killed process %d (%s)\n",
+ task_pid_nr(p), p->comm);
diff --git a/freed-ora/current/F-12/linux-2.6-defaults-acpi-video.patch b/freed-ora/current/F-12/linux-2.6-defaults-acpi-video.patch
new file mode 100644
index 000000000..af883b0d3
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-defaults-acpi-video.patch
@@ -0,0 +1,13 @@
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index d8d7596..a1b7117 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -71,7 +71,7 @@ MODULE_AUTHOR("Bruno Ducrot");
+ MODULE_DESCRIPTION("ACPI Video Driver");
+ MODULE_LICENSE("GPL");
+
+-static int brightness_switch_enabled = 1;
++static int brightness_switch_enabled = 0;
+ module_param(brightness_switch_enabled, bool, 0644);
+
+ static int acpi_video_bus_add(struct acpi_device *device);
diff --git a/freed-ora/current/F-12/linux-2.6-defaults-alsa-hda-beep-off.patch b/freed-ora/current/F-12/linux-2.6-defaults-alsa-hda-beep-off.patch
new file mode 100644
index 000000000..15c1af0c5
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-defaults-alsa-hda-beep-off.patch
@@ -0,0 +1,13 @@
+diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
+index 3ecd7e7..2762b40 100644
+--- a/sound/pci/hda/hda_beep.c
++++ b/sound/pci/hda/hda_beep.c
+@@ -122,7 +122,7 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
+ beep->nid = nid;
+ beep->dev = input_dev;
+ beep->codec = codec;
+- beep->enabled = 1;
++ beep->enabled = 0;
+ codec->beep = beep;
+
+ INIT_WORK(&beep->beep_work, &snd_hda_generate_beep);
diff --git a/freed-ora/current/F-12/linux-2.6-defaults-aspm.patch b/freed-ora/current/F-12/linux-2.6-defaults-aspm.patch
new file mode 100644
index 000000000..49b832d2c
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-defaults-aspm.patch
@@ -0,0 +1,12 @@
+diff -up linux-2.6.30.noarch/drivers/pci/pcie/aspm.c.mjg linux-2.6.30.noarch/drivers/pci/pcie/aspm.c
+--- linux-2.6.30.noarch/drivers/pci/pcie/aspm.c.mjg 2009-07-16 22:01:11.000000000 +0100
++++ linux-2.6.30.noarch/drivers/pci/pcie/aspm.c 2009-07-16 22:01:30.000000000 +0100
+@@ -65,7 +65,7 @@ static LIST_HEAD(link_list);
+ #define POLICY_DEFAULT 0 /* BIOS default setting */
+ #define POLICY_PERFORMANCE 1 /* high performance */
+ #define POLICY_POWERSAVE 2 /* high power saving */
+-static int aspm_policy;
++static int aspm_policy = POLICY_POWERSAVE;
+ static const char *policy_str[] = {
+ [POLICY_DEFAULT] = "default",
+ [POLICY_PERFORMANCE] = "performance",
diff --git a/freed-ora/current/F-12/linux-2.6-defaults-pci_no_msi.patch b/freed-ora/current/F-12/linux-2.6-defaults-pci_no_msi.patch
new file mode 100644
index 000000000..fad6a53b2
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-defaults-pci_no_msi.patch
@@ -0,0 +1,92 @@
+--- linux-2.6.30.noarch/Documentation/kernel-parameters.txt~ 2009-06-24 14:25:04.000000000 -0400
++++ linux-2.6.30.noarch/Documentation/kernel-parameters.txt 2009-06-24 14:25:32.000000000 -0400
+@@ -1811,6 +1811,9 @@ and is between 256 and 4096 characters.
+ check_enable_amd_mmconf [X86] check for and enable
+ properly configured MMIO access to PCI
+ config space on AMD family 10h CPU
++ msi [MSI] If the PCI_MSI kernel config parameter is
++ enabled, this kernel boot option can be used to
++ enable the use of MSI interrupts system-wide.
+ nomsi [MSI] If the PCI_MSI kernel config parameter is
+ enabled, this kernel boot option can be used to
+ disable the use of MSI interrupts system-wide.
+diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+index 2a4501d..209758c 100644
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -21,6 +21,18 @@ config PCI_MSI
+
+ If you don't know what to do here, say N.
+
++config PCI_MSI_DEFAULT_ON
++ def_bool y
++ prompt "Use Message Signaled Interrupts by default"
++ depends on PCI_MSI
++ help
++ Selecting this option will enable use of PCI MSI where applicable
++ by default. Support for MSI can be disabled through the use of the
++ pci=nomsi boot flag. Conversely, if this option is not selected,
++ support for PCI MSI can be enabled by passing the pci=msi flag.
++
++ If you don't know what to do here, say N.
++
+ config PCI_LEGACY
+ bool "Enable deprecated pci_find_* API"
+ depends on PCI
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index 896a15d..53df583 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -23,7 +23,11 @@
+ #include "pci.h"
+ #include "msi.h"
+
++#ifdef CONFIG_PCI_MSI_DEFAULT_ON
+ static int pci_msi_enable = 1;
++#else
++static int pci_msi_enable = 0;
++#endif /*CONFIG_PCI_MSI_DEFAULT_ON*/
+
+ /* Arch hooks */
+
+@@ -786,6 +790,11 @@ int pci_msi_enabled(void)
+ }
+ EXPORT_SYMBOL(pci_msi_enabled);
+
++void pci_yes_msi(void)
++{
++ pci_msi_enable = 1;
++}
++
+ void pci_msi_init_pci_dev(struct pci_dev *dev)
+ {
+ INIT_LIST_HEAD(&dev->msi_list);
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 17bd932..e9bc9fe 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2393,6 +2393,8 @@ static int __init pci_setup(char *str)
+ if (*str && (str = pcibios_setup(str)) && *str) {
+ if (!strcmp(str, "nomsi")) {
+ pci_no_msi();
++ } else if (!strcmp(str, "msi")) {
++ pci_yes_msi();
+ } else if (!strcmp(str, "noaer")) {
+ pci_no_aer();
+ } else if (!strcmp(str, "nodomains")) {
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 26ddf78..85efe81 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -111,9 +111,11 @@ extern unsigned int pci_pm_d3_delay;
+
+ #ifdef CONFIG_PCI_MSI
+ void pci_no_msi(void);
++void pci_yes_msi(void);
+ extern void pci_msi_init_pci_dev(struct pci_dev *dev);
+ #else
+ static inline void pci_no_msi(void) { }
++static inline void pci_yes_msi(void) { }
+ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
+ #endif
+
diff --git a/freed-ora/current/F-12/linux-2.6-defaults-pciehp.patch b/freed-ora/current/F-12/linux-2.6-defaults-pciehp.patch
new file mode 100644
index 000000000..07f2670c7
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-defaults-pciehp.patch
@@ -0,0 +1,13 @@
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index e7f3c9e..4f2b037 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -41,7 +41,7 @@ int pciehp_debug;
+ int pciehp_poll_mode;
+ int pciehp_poll_time;
+ int pciehp_force;
+-int pciehp_passive;
++int pciehp_passive=1;
+ struct workqueue_struct *pciehp_wq;
+
+ #define DRIVER_VERSION "0.4"
diff --git a/freed-ora/current/F-12/linux-2.6-dell-laptop-rfkill-fix.patch b/freed-ora/current/F-12/linux-2.6-dell-laptop-rfkill-fix.patch
new file mode 100644
index 000000000..336788c97
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-dell-laptop-rfkill-fix.patch
@@ -0,0 +1,323 @@
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index 7c237e6..80f1e48 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -88,19 +88,26 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
+ */
+ static void input_pass_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value)
+-{
+- struct input_handle *handle;
++
++{ struct input_handle *handle;
+
+ rcu_read_lock();
+
+ handle = rcu_dereference(dev->grab);
+- if (handle)
++ if (handle) {
+ handle->handler->event(handle, type, code, value);
+- else
+- list_for_each_entry_rcu(handle, &dev->h_list, d_node)
+- if (handle->open)
+- handle->handler->event(handle,
+- type, code, value);
++ goto out;
++ }
++
++ handle = rcu_dereference(dev->filter);
++ if (handle && handle->handler->filter(handle, type, code, value))
++ goto out;
++
++ list_for_each_entry_rcu(handle, &dev->h_list, d_node)
++ if (handle->open)
++ handle->handler->event(handle,
++ type, code, value);
++out:
+ rcu_read_unlock();
+ }
+
+@@ -375,12 +382,15 @@ int input_grab_device(struct input_handle *handle)
+ }
+ EXPORT_SYMBOL(input_grab_device);
+
+-static void __input_release_device(struct input_handle *handle)
++static void __input_release_device(struct input_handle *handle, bool filter)
+ {
+ struct input_dev *dev = handle->dev;
+
+- if (dev->grab == handle) {
+- rcu_assign_pointer(dev->grab, NULL);
++ if (handle == (filter ? dev->filter : dev->grab)) {
++ if (filter)
++ rcu_assign_pointer(dev->filter, NULL);
++ else
++ rcu_assign_pointer(dev->grab, NULL);
+ /* Make sure input_pass_event() notices that grab is gone */
+ synchronize_rcu();
+
+@@ -404,12 +414,65 @@ void input_release_device(struct input_handle *handle)
+ struct input_dev *dev = handle->dev;
+
+ mutex_lock(&dev->mutex);
+- __input_release_device(handle);
++ __input_release_device(handle, false);
+ mutex_unlock(&dev->mutex);
+ }
+ EXPORT_SYMBOL(input_release_device);
+
+ /**
++ * input_filter_device - allow input events to be filtered from higher layers
++ * @handle: input handle that wants to filter the device
++ *
++ * When a device is filtered by an input handle all events generated by
++ * the device are to this handle. If the filter function returns true then
++ * the event is discarded rather than being passed to any other input handles,
++ * otherwise it is passed to them as normal. Grabs will be handled before
++ * filters, so a grabbed device will not deliver events to a filter function.
++ */
++int input_filter_device(struct input_handle *handle)
++{
++ struct input_dev *dev = handle->dev;
++ int retval;
++
++ retval = mutex_lock_interruptible(&dev->mutex);
++ if (retval)
++ return retval;
++
++ if (dev->filter) {
++ retval = -EBUSY;
++ goto out;
++ }
++
++ rcu_assign_pointer(dev->filter, handle);
++ synchronize_rcu();
++
++ out:
++ mutex_unlock(&dev->mutex);
++ return retval;
++}
++EXPORT_SYMBOL(input_filter_device);
++
++/**
++ * input_unfilter_device - removes a filter from a device
++ * @handle: input handle that owns the device
++ *
++ * Removes the filter from a device so that other input handles can
++ * start receiving unfiltered input events. Upon release all handlers
++ * attached to the device have their start() method called so they
++ * have a change to synchronize device state with the rest of the
++ * system.
++ */
++void input_unfilter_device(struct input_handle *handle)
++{
++ struct input_dev *dev = handle->dev;
++
++ mutex_lock(&dev->mutex);
++ __input_release_device(handle, true);
++ mutex_unlock(&dev->mutex);
++}
++EXPORT_SYMBOL(input_unfilter_device);
++
++/**
+ * input_open_device - open input device
+ * @handle: handle through which device is being accessed
+ *
+@@ -482,7 +545,9 @@ void input_close_device(struct input_handle *handle)
+
+ mutex_lock(&dev->mutex);
+
+- __input_release_device(handle);
++ /* Release both grabs and filters */
++ __input_release_device(handle, false);
++ __input_release_device(handle, true);
+
+ if (!--dev->users && dev->close)
+ dev->close(dev);
+diff --git a/include/linux/input.h b/include/linux/input.h
+index 8b3bc3e..e28f116 100644
+--- a/include/linux/input.h
++++ b/include/linux/input.h
+@@ -1118,6 +1118,7 @@ struct input_dev {
+ int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
+
+ struct input_handle *grab;
++ struct input_handle *filter;
+
+ spinlock_t event_lock;
+ struct mutex mutex;
+@@ -1218,6 +1219,7 @@ struct input_handler {
+ void *private;
+
+ void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
++ bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
+ int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id);
+ void (*disconnect)(struct input_handle *handle);
+ void (*start)(struct input_handle *handle);
+@@ -1295,6 +1297,9 @@ void input_unregister_handle(struct input_handle *);
+ int input_grab_device(struct input_handle *);
+ void input_release_device(struct input_handle *);
+
++int input_filter_device(struct input_handle *);
++void input_unfilter_device(struct input_handle *);
++
+ int input_open_device(struct input_handle *);
+ void input_close_device(struct input_handle *);
+
+diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
+index 74909c4..71a4149 100644
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -22,6 +22,7 @@
+ #include <linux/rfkill.h>
+ #include <linux/power_supply.h>
+ #include <linux/acpi.h>
++#include <linux/input.h>
+ #include "../../firmware/dcdbas.h"
+
+ #define BRIGHTNESS_TOKEN 0x7d
+@@ -206,6 +207,16 @@ static const struct rfkill_ops dell_rfkill_ops = {
+ .query = dell_rfkill_query,
+ };
+
++static void dell_rfkill_update(void)
++{
++ if (wifi_rfkill)
++ dell_rfkill_query(wifi_rfkill, (void *)1);
++ if (bluetooth_rfkill)
++ dell_rfkill_query(bluetooth_rfkill, (void *)2);
++ if (wwan_rfkill)
++ dell_rfkill_query(wwan_rfkill, (void *)3);
++}
++
+ static int dell_setup_rfkill(void)
+ {
+ struct calling_interface_buffer buffer;
+@@ -310,6 +321,90 @@ static struct backlight_ops dell_ops = {
+ .update_status = dell_send_intensity,
+ };
+
++static const struct input_device_id dell_input_ids[] = {
++ {
++ .bustype = 0x11,
++ .vendor = 0x01,
++ .product = 0x01,
++ .version = 0xab41,
++ .flags = INPUT_DEVICE_ID_MATCH_BUS |
++ INPUT_DEVICE_ID_MATCH_VENDOR |
++ INPUT_DEVICE_ID_MATCH_PRODUCT |
++ INPUT_DEVICE_ID_MATCH_VERSION
++ },
++ { },
++};
++
++static bool dell_input_filter(struct input_handle *handle, unsigned int type,
++ unsigned int code, int value)
++{
++ if (type == EV_KEY && code == KEY_WLAN && value == 1) {
++ dell_rfkill_update();
++ return 1;
++ }
++
++ return 0;
++}
++
++static void dell_input_event(struct input_handle *handle, unsigned int type,
++ unsigned int code, int value)
++{
++}
++
++static int dell_input_connect(struct input_handler *handler,
++ struct input_dev *dev,
++ const struct input_device_id *id)
++{
++ struct input_handle *handle;
++ int error;
++
++ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
++ if (!handle)
++ return -ENOMEM;
++
++ handle->dev = dev;
++ handle->handler = handler;
++ handle->name = "dell-laptop";
++
++ error = input_register_handle(handle);
++ if (error)
++ goto err_free_handle;
++
++ error = input_open_device(handle);
++ if (error)
++ goto err_unregister_handle;
++
++ error = input_filter_device(handle);
++ if (error)
++ goto err_close_handle;
++
++ return 0;
++
++err_close_handle:
++ input_close_device(handle);
++err_unregister_handle:
++ input_unregister_handle(handle);
++err_free_handle:
++ kfree(handle);
++ return error;
++}
++
++static void dell_input_disconnect(struct input_handle *handle)
++{
++ input_close_device(handle);
++ input_unregister_handle(handle);
++ kfree(handle);
++}
++
++static struct input_handler dell_input_handler = {
++ .name = "dell-laptop",
++ .filter = dell_input_filter,
++ .event = dell_input_event,
++ .connect = dell_input_connect,
++ .disconnect = dell_input_disconnect,
++ .id_table = dell_input_ids,
++};
++
+ static int __init dell_init(void)
+ {
+ struct calling_interface_buffer buffer;
+@@ -333,6 +428,10 @@ static int __init dell_init(void)
+ goto out;
+ }
+
++ if (input_register_handler(&dell_input_handler))
++ printk(KERN_INFO
++ "dell-laptop: Could not register input filter\n");
++
+ #ifdef CONFIG_ACPI
+ /* In the event of an ACPI backlight being available, don't
+ * register the platform controller.
+@@ -388,6 +487,7 @@ static void __exit dell_exit(void)
+ rfkill_unregister(bluetooth_rfkill);
+ if (wwan_rfkill)
+ rfkill_unregister(wwan_rfkill);
++ input_unregister_handler(&dell_input_handler);
+ }
+
+ module_init(dell_init);
+diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
+index 71a4149..e559fa1 100644
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -198,8 +198,8 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data)
+ dell_send_request(&buffer, 17, 11);
+ status = buffer.output[1];
+
+- if (status & BIT(bit))
+- rfkill_set_hw_state(rfkill, !!(status & BIT(16)));
++ rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
++ rfkill_set_hw_state(rfkill, !(status & BIT(16)));
+ }
+
+ static const struct rfkill_ops dell_rfkill_ops = {
+--
+1.6.3.3
+
diff --git a/freed-ora/current/F-12/linux-2.6-driver-level-usb-autosuspend.diff b/freed-ora/current/F-12/linux-2.6-driver-level-usb-autosuspend.diff
new file mode 100644
index 000000000..2632af455
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-driver-level-usb-autosuspend.diff
@@ -0,0 +1,62 @@
+commit 7d0d20a25c6f477fb198b85510c78156d7d7c5af
+Author: Matthew Garrett <mjg@redhat.com>
+Date: Tue Jun 9 20:11:47 2009 +0100
+
+ usb: Allow drivers to enable USB autosuspend on a per-device basis
+
+ USB autosuspend is currently only enabled by default for hubs. On other
+ hardware the decision is made by userspace. This is unnecessary in cases
+ where we know that the hardware supports autosuspend, so this patch adds
+ a function to allow drivers to enable it at probe time.
+
+ Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 4f86447..f7caf00 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1575,6 +1575,22 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
+ EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
+
+ /**
++ * usb_device_autosuspend_enable - enable autosuspend on a device
++ * @udev: the usb_device to be autosuspended
++ *
++ * This routine should be called by an interface driver when it knows that
++ * the device in question supports USB autosuspend.
++ *
++ */
++void usb_device_autosuspend_enable(struct usb_device *udev)
++{
++ udev->autosuspend_disabled = 0;
++ udev->autoresume_disabled = 0;
++ usb_external_suspend_device(udev, PMSG_USER_SUSPEND);
++}
++EXPORT_SYMBOL_GPL(usb_device_autosuspend_enable);
++
++/**
+ * usb_autopm_get_interface - increment a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be incremented
+ *
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index a34fa89..0c22c64 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -543,6 +543,7 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
+
+ /* USB autosuspend and autoresume */
+ #ifdef CONFIG_USB_SUSPEND
++extern void usb_device_autosuspend_enable(struct usb_device *udev);
+ extern int usb_autopm_set_interface(struct usb_interface *intf);
+ extern int usb_autopm_get_interface(struct usb_interface *intf);
+ extern void usb_autopm_put_interface(struct usb_interface *intf);
+@@ -568,6 +569,9 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
+
+ #else
+
++static inline void usb_device_autosuspend_enable(struct usb_device *udev)
++{ }
++
+ static inline int usb_autopm_set_interface(struct usb_interface *intf)
+ { return 0; }
+
diff --git a/freed-ora/current/F-12/linux-2.6-e1000-ich9.patch b/freed-ora/current/F-12/linux-2.6-e1000-ich9.patch
new file mode 100644
index 000000000..5a3391cde
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-e1000-ich9.patch
@@ -0,0 +1,27 @@
+diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
+index 8f8139d..22c49ec 100644
+--- a/drivers/net/e1000e/ich8lan.c
++++ b/drivers/net/e1000e/ich8lan.c
+@@ -209,6 +209,12 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+
+ /* Verify phy id */
+ switch (phy->id) {
++ case 0x0:
++ if (hw->adapter->pdev->device == 0x10be)
++ hw_dbg(hw, "got 0 phy id, trying anyway");
++ /* Fall through to IGP03E1000 case below */
++ else
++ return -E1000_ERR_PHY;
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+--- linux-2.6.25.noarch/drivers/net/e1000e/netdev.c~ 2008-05-16 10:31:41.000000000 -0400
++++ linux-2.6.25.noarch/drivers/net/e1000e/netdev.c 2008-05-16 10:32:43.000000000 -0400
+@@ -4599,6 +4599,7 @@ static struct pci_device_id e1000_pci_tb
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
++ { PCI_VDEVICE(INTEL, 0x10be), board_ich9lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
diff --git a/freed-ora/current/F-12/linux-2.6-enable-btusb-autosuspend.patch b/freed-ora/current/F-12/linux-2.6-enable-btusb-autosuspend.patch
new file mode 100644
index 000000000..7e75341bb
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-enable-btusb-autosuspend.patch
@@ -0,0 +1,18 @@
+commit 8e962bd41a2cbf7f0e55191a757b87f793a725a8
+Author: Matthew Garrett <mjg@redhat.com>
+Date: Tue Jun 9 20:47:51 2009 +0100
+
+ btusb: Enable autosuspend by default
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 44bc8bb..4c33417 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1020,6 +1020,7 @@ static int btusb_probe(struct usb_interface *intf,
+ }
+
+ usb_set_intfdata(intf, data);
++ usb_device_autosuspend_enable(data->udev);
+
+ return 0;
+ }
diff --git a/freed-ora/current/F-12/linux-2.6-execshield.patch b/freed-ora/current/F-12/linux-2.6-execshield.patch
new file mode 100644
index 000000000..61e444f64
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-execshield.patch
@@ -0,0 +1,987 @@
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index e8de2f6..538c2b6 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -5,6 +5,7 @@
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
+ #include <linux/smp.h>
++#include <linux/mm_types.h>
+
+ static inline void fill_ldt(struct desc_struct *desc,
+ const struct user_desc *info)
+@@ -93,6 +94,9 @@ static inline int desc_empty(const void *ptr)
+
+ #define load_TLS(t, cpu) native_load_tls(t, cpu)
+ #define set_ldt native_set_ldt
++#ifdef CONFIG_X86_32
++#define load_user_cs_desc native_load_user_cs_desc
++#endif /*CONFIG_X86_32*/
+
+ #define write_ldt_entry(dt, entry, desc) \
+ native_write_ldt_entry(dt, entry, desc)
+@@ -392,4 +396,25 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(struct desc_struct *desc, unsigned long limit)
++{
++ limit = (limit - 1) / PAGE_SIZE;
++ desc->a = limit & 0xffff;
++ desc->b = (limit & 0xf0000) | 0x00c0fb00;
++}
++
++static inline void native_load_user_cs_desc(int cpu, struct mm_struct *mm)
++{
++ get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs;
++}
++
++#define arch_add_exec_range arch_add_exec_range
++#define arch_remove_exec_range arch_remove_exec_range
++#define arch_flush_exec_range arch_flush_exec_range
++extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit);
++extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit);
++extern void arch_flush_exec_range(struct mm_struct *mm);
++#endif /* CONFIG_X86_32 */
++
+ #endif /* _ASM_X86_DESC_H */
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 80a1dee..8314c66 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -7,12 +7,19 @@
+ /*
+ * The x86 doesn't have a mmu context, but
+ * we put the segment information here.
++ *
++ * exec_limit is used to track the range PROT_EXEC
++ * mappings span.
+ */
+ typedef struct {
+ void *ldt;
+ int size;
+ struct mutex lock;
+ void *vdso;
++#ifdef CONFIG_X86_32
++ struct desc_struct user_cs;
++ unsigned long exec_limit;
++#endif
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index 8aebcc4..cbbd2b0 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -289,6 +289,12 @@ static inline void set_ldt(const void *addr, unsigned entries)
+ {
+ PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
+ }
++#ifdef CONFIG_X86_32
++static inline void load_user_cs_desc(unsigned int cpu, struct mm_struct *mm)
++{
++ PVOP_VCALL2(pv_cpu_ops.load_user_cs_desc, cpu, mm);
++}
++#endif /*CONFIG_X86_32*/
+ static inline void store_gdt(struct desc_ptr *dtr)
+ {
+ PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index dd0f5b3..c2727ef 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -118,6 +118,9 @@ struct pv_cpu_ops {
+ void (*store_gdt)(struct desc_ptr *);
+ void (*store_idt)(struct desc_ptr *);
+ void (*set_ldt)(const void *desc, unsigned entries);
++#ifdef CONFIG_X86_32
++ void (*load_user_cs_desc)(int cpu, struct mm_struct *mm);
++#endif
+ unsigned long (*store_tr)(void);
+ void (*load_tls)(struct thread_struct *t, unsigned int cpu);
+ #ifdef CONFIG_X86_64
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index c3429e8..62cc460 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -161,6 +161,9 @@ static inline int hlt_works(int cpu)
+
+ #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
++#define __HAVE_ARCH_ALIGN_STACK
++extern unsigned long arch_align_stack(unsigned long sp);
++
+ extern void cpu_detect(struct cpuinfo_x86 *c);
+
+ extern struct pt_regs *idle_regs(struct pt_regs *);
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index cc25c2b..6ce4863 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -798,6 +798,20 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++ /*
++ * emulation of NX with segment limits unfortunately means
++ * we have to disable the fast system calls, due to the way that
++ * sysexit clears the segment limits on return.
++ * If we have either disabled exec-shield on the boot command line,
++ * or we have NX, then we don't need to do this.
++ */
++ if (exec_shield != 0) {
++#ifdef CONFIG_X86_PAE
++ if (!test_cpu_cap(c, X86_FEATURE_NX))
++#endif
++ clear_cpu_cap(c, X86_FEATURE_SEP);
++ }
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 1b1739d..c2dda16 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -345,6 +345,9 @@ struct pv_cpu_ops pv_cpu_ops = {
+ .read_tscp = native_read_tscp,
+ .load_tr_desc = native_load_tr_desc,
+ .set_ldt = native_set_ldt,
++#ifdef CONFIG_X86_32
++ .load_user_cs_desc = native_load_user_cs_desc,
++#endif /*CONFIG_X86_32*/
+ .load_gdt = native_load_gdt,
+ .load_idt = native_load_idt,
+ .store_gdt = native_store_gdt,
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 4cf7956..b2407dc 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -296,7 +296,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+ void
+ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+ {
++ int cpu;
++
+ set_user_gs(regs, 0);
++
+ regs->fs = 0;
+ set_fs(USER_DS);
+ regs->ds = __USER_DS;
+@@ -305,6 +308,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+ regs->cs = __USER_CS;
+ regs->ip = new_ip;
+ regs->sp = new_sp;
++
++ cpu = get_cpu();
++ load_user_cs_desc(cpu, current->mm);
++ put_cpu();
++
+ /*
+ * Free the old FP and other extended state
+ */
+@@ -364,6 +372,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ if (preload_fpu)
+ prefetch(next->xstate);
+
++ if (next_p->mm)
++ load_user_cs_desc(cpu, next_p->mm);
++
+ /*
+ * Reload esp0.
+ */
+@@ -497,3 +508,40 @@ unsigned long get_wchan(struct task_struct *p)
+ return 0;
+ }
+
++static void modify_cs(struct mm_struct *mm, unsigned long limit)
++{
++ mm->context.exec_limit = limit;
++ set_user_cs(&mm->context.user_cs, limit);
++ if (mm == current->mm) {
++ int cpu;
++
++ cpu = get_cpu();
++ load_user_cs_desc(cpu, mm);
++ put_cpu();
++ }
++}
++
++void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
++{
++ if (limit > mm->context.exec_limit)
++ modify_cs(mm, limit);
++}
++
++void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
++{
++ struct vm_area_struct *vma;
++ unsigned long limit = PAGE_SIZE;
++
++ if (old_end == mm->context.exec_limit) {
++ for (vma = mm->mmap; vma; vma = vma->vm_next)
++ if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
++ limit = vma->vm_end;
++ modify_cs(mm, limit);
++ }
++}
++
++void arch_flush_exec_range(struct mm_struct *mm)
++{
++ mm->context.exec_limit = 0;
++ set_user_cs(&mm->context.user_cs, 0);
++}
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 7e37dce..92ae538 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -115,6 +115,76 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
+ if (!user_mode_vm(regs))
+ die(str, regs, err);
+ }
++
++static inline int
++__compare_user_cs_desc(const struct desc_struct *desc1,
++ const struct desc_struct *desc2)
++{
++ return ((desc1->limit0 != desc2->limit0) ||
++ (desc1->limit != desc2->limit) ||
++ (desc1->base0 != desc2->base0) ||
++ (desc1->base1 != desc2->base1) ||
++ (desc1->base2 != desc2->base2));
++}
++
++/*
++ * lazy-check for CS validity on exec-shield binaries:
++ *
++ * the original non-exec stack patch was written by
++ * Solar Designer <solar at openwall.com>. Thanks!
++ */
++static int
++check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
++{
++ struct desc_struct *desc1, *desc2;
++ struct vm_area_struct *vma;
++ unsigned long limit;
++
++ if (current->mm == NULL)
++ return 0;
++
++ limit = -1UL;
++ if (current->mm->context.exec_limit != -1UL) {
++ limit = PAGE_SIZE;
++ spin_lock(&current->mm->page_table_lock);
++ for (vma = current->mm->mmap; vma; vma = vma->vm_next)
++ if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
++ limit = vma->vm_end;
++ vma = get_gate_vma(current);
++ if (vma && (vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
++ limit = vma->vm_end;
++ spin_unlock(&current->mm->page_table_lock);
++ if (limit >= TASK_SIZE)
++ limit = -1UL;
++ current->mm->context.exec_limit = limit;
++ }
++ set_user_cs(&current->mm->context.user_cs, limit);
++
++ desc1 = &current->mm->context.user_cs;
++ desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
++
++ if (__compare_user_cs_desc(desc1, desc2)) {
++ /*
++ * The CS was not in sync - reload it and retry the
++ * instruction. If the instruction still faults then
++ * we won't hit this branch next time around.
++ */
++ if (print_fatal_signals >= 2) {
++ printk(KERN_ERR "#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n",
++ error_code, error_code/8, regs->ip,
++ smp_processor_id());
++ printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x, CPU_cs: %08x/%08x.\n",
++ current->mm->context.exec_limit,
++ desc1->a, desc1->b, desc2->a, desc2->b);
++ }
++
++ load_user_cs_desc(cpu, current->mm);
++
++ return 1;
++ }
++
++ return 0;
++}
+ #endif
+
+ static void __kprobes
+@@ -273,6 +343,29 @@ do_general_protection(struct pt_regs *regs, long error_code)
+ if (!user_mode(regs))
+ goto gp_in_kernel;
+
++#ifdef CONFIG_X86_32
++{
++ int cpu;
++ int ok;
++
++ cpu = get_cpu();
++ ok = check_lazy_exec_limit(cpu, regs, error_code);
++ put_cpu();
++
++ if (ok)
++ return;
++
++ if (print_fatal_signals) {
++ printk(KERN_ERR "#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n",
++ error_code, error_code/8, regs->ip, smp_processor_id());
++ printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x.\n",
++ current->mm->context.exec_limit,
++ current->mm->context.user_cs.a,
++ current->mm->context.user_cs.b);
++ }
++}
++#endif /*CONFIG_X86_32*/
++
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+
+@@ -881,19 +974,37 @@ do_device_not_available(struct pt_regs *regs, long error_code)
+ }
+
+ #ifdef CONFIG_X86_32
++/*
++ * The fixup code for errors in iret jumps to here (iret_exc). It loses
++ * the original trap number and erorr code. The bogus trap 32 and error
++ * code 0 are what the vanilla kernel delivers via:
++ * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
++ *
++ * NOTE: Because of the final "1" in the macro we need to enable interrupts.
++ *
++ * In case of a general protection fault in the iret instruction, we
++ * need to check for a lazy CS update for exec-shield.
++ */
+ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
+ {
+- siginfo_t info;
++ int ok;
++ int cpu;
++
+ local_irq_enable();
+
+- info.si_signo = SIGILL;
+- info.si_errno = 0;
+- info.si_code = ILL_BADSTK;
+- info.si_addr = NULL;
+- if (notify_die(DIE_TRAP, "iret exception",
+- regs, error_code, 32, SIGILL) == NOTIFY_STOP)
+- return;
+- do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
++ cpu = get_cpu();
++ ok = check_lazy_exec_limit(cpu, regs, error_code);
++ put_cpu();
++
++ if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
++ error_code, 32, SIGSEGV) != NOTIFY_STOP) {
++ siginfo_t info;
++ info.si_signo = SIGSEGV;
++ info.si_errno = 0;
++ info.si_code = ILL_BADSTK;
++ info.si_addr = 0;
++ do_trap(32, SIGSEGV, "iret exception", regs, error_code, &info);
++ }
+ }
+ #endif
+
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 73ffd55..0cf2a7b 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -149,6 +149,12 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+ set_nx();
+ if (nx_enabled)
+ printk(KERN_INFO "NX (Execute Disable) protection: active\n");
++#ifdef CONFIG_X86_32
++ else
++ if (exec_shield)
++ printk(KERN_INFO "Using x86 segment limits to approximate "
++ "NX protection\n");
++#endif
+
+ /* Enable PSE if available */
+ if (cpu_has_pse)
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index c8191de..7d84d01 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -124,13 +124,16 @@ static unsigned long mmap_legacy_base(void)
+ */
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+- if (mmap_is_legacy()) {
++ if (!(2 & exec_shield) && mmap_is_legacy()) {
+ mm->mmap_base = mmap_legacy_base();
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
++ if (!(current->personality & READ_IMPLIES_EXEC)
++ && mmap_is_ia32())
++ mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+ }
+diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
+index 513d8ed..c614a90 100644
+--- a/arch/x86/mm/setup_nx.c
++++ b/arch/x86/mm/setup_nx.c
+@@ -1,3 +1,4 @@
++#include <linux/sched.h>
+ #include <linux/spinlock.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
+@@ -27,6 +28,9 @@ static int __init noexec_setup(char *str)
+ } else if (!strncmp(str, "off", 3)) {
+ disable_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
++#ifdef CONFIG_X86_32
++ exec_shield = 0;
++#endif
+ }
+ return 0;
+ }
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 36fe08e..3806a45 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -6,6 +6,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+
++#include <asm/desc.h>
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
+ #include <asm/apic.h>
+@@ -130,6 +131,12 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
+ union smp_flush_state *f;
+
+ cpu = smp_processor_id();
++
++#ifdef CONFIG_X86_32
++ if (current->active_mm)
++ load_user_cs_desc(cpu, current->active_mm);
++#endif
++
+ /*
+ * orig_rax contains the negated interrupt vector.
+ * Use that to determine where the sender put the data.
+diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
+index 58bc00f..1fdafb5 100644
+--- a/arch/x86/vdso/vdso32-setup.c
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ if (compat)
+ addr = VDSO_HIGH_BASE;
+ else {
+- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++ addr = get_unmapped_area_prot(NULL, 0, PAGE_SIZE, 0, 0, 1);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 3439616..31e5c6f 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -323,6 +323,24 @@ static void xen_set_ldt(const void *addr, unsigned entries)
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+ }
+
++#ifdef CONFIG_X86_32
++static void xen_load_user_cs_desc(int cpu, struct mm_struct *mm)
++{
++ void *gdt;
++ xmaddr_t mgdt;
++ u64 descriptor;
++ struct desc_struct user_cs;
++
++ gdt = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS];
++ mgdt = virt_to_machine(gdt);
++
++ user_cs = mm->context.user_cs;
++ descriptor = (u64) user_cs.a | ((u64) user_cs.b) << 32;
++
++ HYPERVISOR_update_descriptor(mgdt.maddr, descriptor);
++}
++#endif /*CONFIG_X86_32*/
++
+ static void xen_load_gdt(const struct desc_ptr *dtr)
+ {
+ unsigned long va = dtr->address;
+@@ -949,6 +967,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
+
+ .load_tr_desc = paravirt_nop,
+ .set_ldt = xen_set_ldt,
++#ifdef CONFIG_X86_32
++ .load_user_cs_desc = xen_load_user_cs_desc,
++#endif /*CONFIG_X86_32*/
+ .load_gdt = xen_load_gdt,
+ .load_idt = xen_load_idt,
+ .load_tls = xen_load_tls,
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index b9b3bb5..1e55926 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -73,7 +73,7 @@ static struct linux_binfmt elf_format = {
+ .hasvdso = 1
+ };
+
+-#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
++#define BAD_ADDR(x) IS_ERR_VALUE(x)
+
+ static int set_brk(unsigned long start, unsigned long end)
+ {
+@@ -721,6 +721,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ break;
+ }
+
++ if (current->personality == PER_LINUX && (exec_shield & 2)) {
++ executable_stack = EXSTACK_DISABLE_X;
++ current->flags |= PF_RANDOMIZE;
++ }
++
+ /* Some simple consistency checks for the interpreter */
+ if (elf_interpreter) {
+ retval = -ELIBBAD;
+@@ -740,6 +745,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ if (retval)
+ goto out_free_dentry;
+
++#ifdef CONFIG_X86_32
++ /*
++ * Turn off the CS limit completely if exec-shield disabled or
++ * NX active:
++ */
++ if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
++ arch_add_exec_range(current->mm, -1);
++#endif
++
+ /* OK, This is the point of no return */
+ current->flags &= ~PF_FORKNOEXEC;
+ current->mm->def_flags = def_flags;
+@@ -747,7 +761,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(loc->elf_ex);
+- if (elf_read_implies_exec(loc->elf_ex, executable_stack))
++ if (!(exec_shield & 2) &&
++ elf_read_implies_exec(loc->elf_ex, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+@@ -912,7 +927,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ interpreter,
+ &interp_map_addr,
+ load_bias);
+- if (!IS_ERR((void *)elf_entry)) {
++ if (!BAD_ADDR(elf_entry)) {
+ /*
+ * load_elf_interp() returns relocation
+ * adjustment
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 24c3956..88f944d 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1129,7 +1129,13 @@ extern int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long flags, struct page **pages);
+
+-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
++extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
++
++static inline unsigned long get_unmapped_area(struct file *file, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
++}
+
+ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 84a524a..a81e0db 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -206,6 +206,9 @@ struct mm_struct {
+ unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags);
++ unsigned long (*get_unmapped_exec_area) (struct file *filp,
++ unsigned long addr, unsigned long len,
++ unsigned long pgoff, unsigned long flags);
+ void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long task_size; /* size of task vm space */
+diff --git a/include/linux/resource.h b/include/linux/resource.h
+index 40fc7e6..68c2549 100644
+--- a/include/linux/resource.h
++++ b/include/linux/resource.h
+@@ -55,8 +55,11 @@ struct rlimit {
+ /*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
++ *
++ * (2MB more to cover randomization effects.)
+ */
+-#define _STK_LIM (8*1024*1024)
++#define _STK_LIM (10*1024*1024)
++#define EXEC_STACK_BIAS (2*1024*1024)
+
+ /*
+ * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 75e6e60..0bce489 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -102,6 +102,9 @@ struct fs_struct;
+ struct bts_context;
+ struct perf_event_context;
+
++extern int exec_shield;
++extern int print_fatal_signals;
++
+ /*
+ * List of flags we want to share for kernel threads,
+ * if only because they are not used by them anyway.
+@@ -378,6 +381,10 @@ extern int sysctl_max_map_count;
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
++
++extern unsigned long
++arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
++ unsigned long, unsigned long);
+ extern unsigned long
+ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 0d949c5..12ca319 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -88,6 +88,26 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
+ #ifndef CONFIG_MMU
+ extern int sysctl_nr_trim_pages;
+ #endif
++
++int exec_shield = (1<<0);
++/* exec_shield is a bitmask:
++ * 0: off; vdso at STACK_TOP, 1 page below TASK_SIZE
++ * (1<<0) 1: on [also on if !=0]
++ * (1<<1) 2: force noexecstack regardless of PT_GNU_STACK
++ * The old settings
++ * (1<<2) 4: vdso just below .text of main (unless too low)
++ * (1<<3) 8: vdso just below .text of PT_INTERP (unless too low)
++ * are ignored because the vdso is placed completely randomly
++ */
++
++static int __init setup_exec_shield(char *str)
++{
++ get_option(&str, &exec_shield);
++
++ return 1;
++}
++__setup("exec-shield=", setup_exec_shield);
++
+ #ifdef CONFIG_RCU_TORTURE_TEST
+ extern int rcutorture_runnable;
+ #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
+@@ -408,6 +428,14 @@ static struct ctl_table kern_table[] = {
+ .proc_handler = &proc_dointvec,
+ },
+ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "exec-shield",
++ .data = &exec_shield,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec,
++ },
++ {
+ .ctl_name = KERN_CORE_USES_PID,
+ .procname = "core_uses_pid",
+ .data = &core_uses_pid,
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 73f5e4b..814b95f 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -29,6 +29,7 @@
+ #include <linux/rmap.h>
+ #include <linux/mmu_notifier.h>
+ #include <linux/perf_event.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -45,6 +46,18 @@
+ #define arch_rebalance_pgtables(addr, len) (addr)
+ #endif
+
++/* No sane architecture will #define these to anything else */
++#ifndef arch_add_exec_range
++#define arch_add_exec_range(mm, limit) do { ; } while (0)
++#endif
++#ifndef arch_flush_exec_range
++#define arch_flush_exec_range(mm) do { ; } while (0)
++#endif
++#ifndef arch_remove_exec_range
++#define arch_remove_exec_range(mm, limit) do { ; } while (0)
++#endif
++
++
+ static void unmap_region(struct mm_struct *mm,
+ struct vm_area_struct *vma, struct vm_area_struct *prev,
+ unsigned long start, unsigned long end);
+@@ -389,6 +402,8 @@ static inline void
+ __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct rb_node *rb_parent)
+ {
++ if (vma->vm_flags & VM_EXEC)
++ arch_add_exec_range(mm, vma->vm_end);
+ if (prev) {
+ vma->vm_next = prev->vm_next;
+ prev->vm_next = vma;
+@@ -491,6 +506,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ if (mm->mmap_cache == vma)
+ mm->mmap_cache = prev;
++ if (vma->vm_flags & VM_EXEC)
++ arch_remove_exec_range(mm, vma->vm_end);
+ }
+
+ /*
+@@ -798,6 +815,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ } else /* cases 2, 5, 7 */
+ vma_adjust(prev, prev->vm_start,
+ end, prev->vm_pgoff, NULL);
++ if (prev->vm_flags & VM_EXEC)
++ arch_add_exec_range(mm, prev->vm_end);
+ return prev;
+ }
+
+@@ -970,7 +989,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+- addr = get_unmapped_area(file, addr, len, pgoff, flags);
++ addr = get_unmapped_area_prot(file, addr, len, pgoff, flags,
++ prot & PROT_EXEC);
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+@@ -1453,21 +1473,25 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+ }
+
+ unsigned long
+-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+- unsigned long pgoff, unsigned long flags)
++get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
++ unsigned long pgoff, unsigned long flags, int exec)
+ {
+ unsigned long (*get_area)(struct file *, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+
+ unsigned long error = arch_mmap_check(addr, len, flags);
+ if (error)
+ return error;
+
+ /* Careful about overflows.. */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+- get_area = current->mm->get_unmapped_area;
++ if (exec && current->mm->get_unmapped_exec_area)
++ get_area = current->mm->get_unmapped_exec_area;
++ else
++ get_area = current->mm->get_unmapped_area;
++
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ get_area = file->f_op->get_unmapped_area;
+ addr = get_area(file, addr, len, pgoff, flags);
+@@ -1473,8 +1497,76 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+
+ return arch_rebalance_pgtables(addr, len);
+ }
++EXPORT_SYMBOL(get_unmapped_area_prot);
++
++#define SHLIB_BASE 0x00110000
++
++unsigned long
++arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
++ unsigned long len0, unsigned long pgoff, unsigned long flags)
++{
++ unsigned long addr = addr0, len = len0;
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long tmp;
++
++ if (len > TASK_SIZE)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++ if (!addr)
++ addr = randomize_range(SHLIB_BASE, 0x01000000, len);
++
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ vma = find_vma(mm, addr);
++ if (TASK_SIZE - len >= addr &&
++ (!vma || addr + len <= vma->vm_start))
++ return addr;
++ }
++
++ addr = SHLIB_BASE;
++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
++ /* At this point: (!vma || addr < vma->vm_end). */
++ if (TASK_SIZE - len < addr)
++ return -ENOMEM;
++
++ if (!vma || addr + len <= vma->vm_start) {
++ /*
++ * Must not let a PROT_EXEC mapping get into the
++ * brk area:
++ */
++ if (addr + len > mm->brk)
++ goto failed;
++
++ /*
++ * Up until the brk area we randomize addresses
++ * as much as possible:
++ */
++ if (addr >= 0x01000000) {
++ tmp = randomize_range(0x01000000,
++ PAGE_ALIGN(max(mm->start_brk,
++ (unsigned long)0x08000000)), len);
++ vma = find_vma(mm, tmp);
++ if (TASK_SIZE - len >= tmp &&
++ (!vma || tmp + len <= vma->vm_start))
++ return tmp;
++ }
++ /*
++ * Ok, randomization didnt work out - return
++ * the result of the linear search:
++ */
++ return addr;
++ }
++ addr = vma->vm_end;
++ }
++
++failed:
++ return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
++}
+
+-EXPORT_SYMBOL(get_unmapped_area);
+
+ /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+@@ -1549,6 +1641,14 @@ out:
+ return prev ? prev->vm_next : vma;
+ }
+
++static int over_stack_limit(unsigned long sz)
++{
++ if (sz < EXEC_STACK_BIAS)
++ return 0;
++ return (sz - EXEC_STACK_BIAS) >
++ current->signal->rlim[RLIMIT_STACK].rlim_cur;
++}
++
+ /*
+ * Verify that the stack growth is acceptable and
+ * update accounting. This is shared with both the
+@@ -1565,7 +1665,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+ return -ENOMEM;
+
+ /* Stack limit test */
+- if (size > rlim[RLIMIT_STACK].rlim_cur)
++ if (over_stack_limit(size))
+ return -ENOMEM;
+
+ /* mlock limit tests */
+@@ -1875,10 +1975,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ if (new->vm_ops && new->vm_ops->open)
+ new->vm_ops->open(new);
+
+- if (new_below)
++ if (new_below) {
++ unsigned long old_end = vma->vm_end;
++
+ vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
+ ((addr - new->vm_start) >> PAGE_SHIFT), new);
+- else
++ if (vma->vm_flags & VM_EXEC)
++ arch_remove_exec_range(mm, old_end);
++ } else
+ vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
+
+ return 0;
+@@ -2128,6 +2232,7 @@ void exit_mmap(struct mm_struct *mm)
+
+ free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
+ tlb_finish_mmu(tlb, 0, end);
++ arch_flush_exec_range(mm);
+
+ /*
+ * Walk the list again, actually closing and freeing it,
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 8bc969d..3c9b4fc 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -26,9 +26,14 @@
+ #include <linux/perf_event.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
++#include <asm/pgalloc.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+
++#ifndef arch_remove_exec_range
++#define arch_remove_exec_range(mm, limit) do { ; } while (0)
++#endif
++
+ #ifndef pgprot_modify
+ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+ {
+@@ -139,7 +144,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long oldflags = vma->vm_flags;
+ long nrpages = (end - start) >> PAGE_SHIFT;
+- unsigned long charged = 0;
++ unsigned long charged = 0, old_end = vma->vm_end;
+ pgoff_t pgoff;
+ int error;
+ int dirty_accountable = 0;
+@@ -204,6 +209,9 @@ success:
+ dirty_accountable = 1;
+ }
+
++ if (oldflags & VM_EXEC)
++ arch_remove_exec_range(current->mm, old_end);
++
+ mmu_notifier_invalidate_range_start(mm, start, end);
+ if (is_vm_hugetlb_page(vma))
+ hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 97bff25..17a9fd7 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -414,10 +414,10 @@ unsigned long do_mremap(unsigned long addr,
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+- new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
++ new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
+ vma->vm_pgoff +
+ ((addr - vma->vm_start) >> PAGE_SHIFT),
+- map_flags);
++ map_flags, vma->vm_flags & VM_EXEC);
+ if (new_addr & ~PAGE_MASK) {
+ ret = new_addr;
+ goto out;
diff --git a/freed-ora/current/F-12/linux-2.6-firewire-git-pending.patch b/freed-ora/current/F-12/linux-2.6-firewire-git-pending.patch
new file mode 100644
index 000000000..e05471f1b
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-firewire-git-pending.patch
@@ -0,0 +1,4 @@
+#
+# Patches under review and/or pending inclusion in the linux1394-git
+# tree (and/or in by the time your read this), which we want...
+#
diff --git a/freed-ora/current/F-12/linux-2.6-firewire-git-update.patch b/freed-ora/current/F-12/linux-2.6-firewire-git-update.patch
new file mode 100644
index 000000000..685808133
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-firewire-git-update.patch
@@ -0,0 +1,3682 @@
+linux1394-2.6.git tree vs. linus v2.6.29-rc3-git1 on 20090130 by jarod
+
+---
+ firewire-git/drivers/firewire/fw-card.c | 68 -
+ firewire-git/drivers/firewire/fw-cdev.c | 1014 +++++++++++++++++--------
+ firewire-git/drivers/firewire/fw-device.c | 43 -
+ firewire-git/drivers/firewire/fw-device.h | 7
+ firewire-git/drivers/firewire/fw-iso.c | 225 ++++-
+ firewire-git/drivers/firewire/fw-ohci.c | 236 ++---
+ firewire-git/drivers/firewire/fw-sbp2.c | 57 -
+ firewire-git/drivers/firewire/fw-topology.c | 28
+ firewire-git/drivers/firewire/fw-topology.h | 19
+ firewire-git/drivers/firewire/fw-transaction.c | 151 +--
+ firewire-git/drivers/firewire/fw-transaction.h | 125 ---
+ include/linux/firewire-cdev.h | 170 +++-
+ 12 files changed, 1359 insertions(+), 784 deletions(-)
+
+diff -Naurp linux-2.6-git/drivers/firewire/fw-card.c firewire-git/drivers/firewire/fw-card.c
+--- linux-2.6-git/drivers/firewire/fw-card.c 2009-01-30 13:39:02.989651512 -0500
++++ firewire-git/drivers/firewire/fw-card.c 2009-01-30 13:35:51.859771884 -0500
+@@ -63,8 +63,7 @@ static int descriptor_count;
+ #define BIB_CMC ((1) << 30)
+ #define BIB_IMC ((1) << 31)
+
+-static u32 *
+-generate_config_rom(struct fw_card *card, size_t *config_rom_length)
++static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
+ {
+ struct fw_descriptor *desc;
+ static u32 config_rom[256];
+@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card
+ return config_rom;
+ }
+
+-static void
+-update_config_roms(void)
++static void update_config_roms(void)
+ {
+ struct fw_card *card;
+ u32 *config_rom;
+@@ -141,8 +139,7 @@ update_config_roms(void)
+ }
+ }
+
+-int
+-fw_core_add_descriptor(struct fw_descriptor *desc)
++int fw_core_add_descriptor(struct fw_descriptor *desc)
+ {
+ size_t i;
+
+@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descrip
+ return 0;
+ }
+
+-void
+-fw_core_remove_descriptor(struct fw_descriptor *desc)
++void fw_core_remove_descriptor(struct fw_descriptor *desc)
+ {
+ mutex_lock(&card_mutex);
+
+@@ -189,8 +185,7 @@ static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ };
+
+-void
+-fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
++void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
+ {
+ int scheduled;
+
+@@ -200,8 +195,7 @@ fw_schedule_bm_work(struct fw_card *card
+ fw_card_put(card);
+ }
+
+-static void
+-fw_card_bm_work(struct work_struct *work)
++static void fw_card_bm_work(struct work_struct *work)
+ {
+ struct fw_card *card = container_of(work, struct fw_card, work.work);
+ struct fw_device *root_device;
+@@ -371,17 +365,16 @@ fw_card_bm_work(struct work_struct *work
+ fw_card_put(card);
+ }
+
+-static void
+-flush_timer_callback(unsigned long data)
++static void flush_timer_callback(unsigned long data)
+ {
+ struct fw_card *card = (struct fw_card *)data;
+
+ fw_flush_transactions(card);
+ }
+
+-void
+-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
+- struct device *device)
++void fw_card_initialize(struct fw_card *card,
++ const struct fw_card_driver *driver,
++ struct device *device)
+ {
+ static atomic_t index = ATOMIC_INIT(-1);
+
+@@ -406,9 +399,8 @@ fw_card_initialize(struct fw_card *card,
+ }
+ EXPORT_SYMBOL(fw_card_initialize);
+
+-int
+-fw_card_add(struct fw_card *card,
+- u32 max_receive, u32 link_speed, u64 guid)
++int fw_card_add(struct fw_card *card,
++ u32 max_receive, u32 link_speed, u64 guid)
+ {
+ u32 *config_rom;
+ size_t length;
+@@ -435,23 +427,20 @@ EXPORT_SYMBOL(fw_card_add);
+ * dummy driver just fails all IO.
+ */
+
+-static int
+-dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
++static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
+ {
+ BUG();
+ return -1;
+ }
+
+-static int
+-dummy_update_phy_reg(struct fw_card *card, int address,
+- int clear_bits, int set_bits)
++static int dummy_update_phy_reg(struct fw_card *card, int address,
++ int clear_bits, int set_bits)
+ {
+ return -ENODEV;
+ }
+
+-static int
+-dummy_set_config_rom(struct fw_card *card,
+- u32 *config_rom, size_t length)
++static int dummy_set_config_rom(struct fw_card *card,
++ u32 *config_rom, size_t length)
+ {
+ /*
+ * We take the card out of card_list before setting the dummy
+@@ -461,27 +450,23 @@ dummy_set_config_rom(struct fw_card *car
+ return -1;
+ }
+
+-static void
+-dummy_send_request(struct fw_card *card, struct fw_packet *packet)
++static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
+ {
+ packet->callback(packet, card, -ENODEV);
+ }
+
+-static void
+-dummy_send_response(struct fw_card *card, struct fw_packet *packet)
++static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
+ {
+ packet->callback(packet, card, -ENODEV);
+ }
+
+-static int
+-dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
++static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+ {
+ return -ENOENT;
+ }
+
+-static int
+-dummy_enable_phys_dma(struct fw_card *card,
+- int node_id, int generation)
++static int dummy_enable_phys_dma(struct fw_card *card,
++ int node_id, int generation)
+ {
+ return -ENODEV;
+ }
+@@ -496,16 +481,14 @@ static struct fw_card_driver dummy_drive
+ .enable_phys_dma = dummy_enable_phys_dma,
+ };
+
+-void
+-fw_card_release(struct kref *kref)
++void fw_card_release(struct kref *kref)
+ {
+ struct fw_card *card = container_of(kref, struct fw_card, kref);
+
+ complete(&card->done);
+ }
+
+-void
+-fw_core_remove_card(struct fw_card *card)
++void fw_core_remove_card(struct fw_card *card)
+ {
+ card->driver->update_phy_reg(card, 4,
+ PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+@@ -529,8 +512,7 @@ fw_core_remove_card(struct fw_card *card
+ }
+ EXPORT_SYMBOL(fw_core_remove_card);
+
+-int
+-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
++int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
+ {
+ int reg = short_reset ? 5 : 1;
+ int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
+diff -Naurp linux-2.6-git/drivers/firewire/fw-cdev.c firewire-git/drivers/firewire/fw-cdev.c
+--- linux-2.6-git/drivers/firewire/fw-cdev.c 2008-11-04 11:19:19.000000000 -0500
++++ firewire-git/drivers/firewire/fw-cdev.c 2009-01-30 13:35:51.860646788 -0500
+@@ -18,87 +18,162 @@
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+-#include <linux/module.h>
+-#include <linux/kernel.h>
+-#include <linux/wait.h>
+-#include <linux/errno.h>
++#include <linux/compat.h>
++#include <linux/delay.h>
+ #include <linux/device.h>
+-#include <linux/vmalloc.h>
++#include <linux/errno.h>
++#include <linux/firewire-cdev.h>
++#include <linux/idr.h>
++#include <linux/jiffies.h>
++#include <linux/kernel.h>
++#include <linux/kref.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/poll.h>
+ #include <linux/preempt.h>
++#include <linux/spinlock.h>
+ #include <linux/time.h>
+-#include <linux/delay.h>
+-#include <linux/mm.h>
+-#include <linux/idr.h>
+-#include <linux/compat.h>
+-#include <linux/firewire-cdev.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+-#include "fw-transaction.h"
+-#include "fw-topology.h"
++
+ #include "fw-device.h"
++#include "fw-topology.h"
++#include "fw-transaction.h"
++
++struct client {
++ u32 version;
++ struct fw_device *device;
++
++ spinlock_t lock;
++ bool in_shutdown;
++ struct idr resource_idr;
++ struct list_head event_list;
++ wait_queue_head_t wait;
++ u64 bus_reset_closure;
++
++ struct fw_iso_context *iso_context;
++ u64 iso_closure;
++ struct fw_iso_buffer buffer;
++ unsigned long vm_start;
+
+-struct client;
+-struct client_resource {
+ struct list_head link;
+- void (*release)(struct client *client, struct client_resource *r);
+- u32 handle;
++ struct kref kref;
++};
++
++static inline void client_get(struct client *client)
++{
++ kref_get(&client->kref);
++}
++
++static void client_release(struct kref *kref)
++{
++ struct client *client = container_of(kref, struct client, kref);
++
++ fw_device_put(client->device);
++ kfree(client);
++}
++
++static void client_put(struct client *client)
++{
++ kref_put(&client->kref, client_release);
++}
++
++struct client_resource;
++typedef void (*client_resource_release_fn_t)(struct client *,
++ struct client_resource *);
++struct client_resource {
++ client_resource_release_fn_t release;
++ int handle;
++};
++
++struct address_handler_resource {
++ struct client_resource resource;
++ struct fw_address_handler handler;
++ __u64 closure;
++ struct client *client;
++};
++
++struct outbound_transaction_resource {
++ struct client_resource resource;
++ struct fw_transaction transaction;
++};
++
++struct inbound_transaction_resource {
++ struct client_resource resource;
++ struct fw_request *request;
++ void *data;
++ size_t length;
+ };
+
++struct descriptor_resource {
++ struct client_resource resource;
++ struct fw_descriptor descriptor;
++ u32 data[0];
++};
++
++struct iso_resource {
++ struct client_resource resource;
++ struct client *client;
++ /* Schedule work and access todo only with client->lock held. */
++ struct delayed_work work;
++ enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
++ ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
++ int generation;
++ u64 channels;
++ s32 bandwidth;
++ struct iso_resource_event *e_alloc, *e_dealloc;
++};
++
++static void schedule_iso_resource(struct iso_resource *);
++static void release_iso_resource(struct client *, struct client_resource *);
++
+ /*
+ * dequeue_event() just kfree()'s the event, so the event has to be
+- * the first field in the struct.
++ * the first field in a struct XYZ_event.
+ */
+-
+ struct event {
+ struct { void *data; size_t size; } v[2];
+ struct list_head link;
+ };
+
+-struct bus_reset {
++struct bus_reset_event {
+ struct event event;
+ struct fw_cdev_event_bus_reset reset;
+ };
+
+-struct response {
++struct outbound_transaction_event {
+ struct event event;
+- struct fw_transaction transaction;
+ struct client *client;
+- struct client_resource resource;
++ struct outbound_transaction_resource r;
+ struct fw_cdev_event_response response;
+ };
+
+-struct iso_interrupt {
++struct inbound_transaction_event {
+ struct event event;
+- struct fw_cdev_event_iso_interrupt interrupt;
++ struct fw_cdev_event_request request;
+ };
+
+-struct client {
+- u32 version;
+- struct fw_device *device;
+- spinlock_t lock;
+- u32 resource_handle;
+- struct list_head resource_list;
+- struct list_head event_list;
+- wait_queue_head_t wait;
+- u64 bus_reset_closure;
+-
+- struct fw_iso_context *iso_context;
+- u64 iso_closure;
+- struct fw_iso_buffer buffer;
+- unsigned long vm_start;
++struct iso_interrupt_event {
++ struct event event;
++ struct fw_cdev_event_iso_interrupt interrupt;
++};
+
+- struct list_head link;
++struct iso_resource_event {
++ struct event event;
++ struct fw_cdev_event_iso_resource resource;
+ };
+
+-static inline void __user *
+-u64_to_uptr(__u64 value)
++static inline void __user *u64_to_uptr(__u64 value)
+ {
+ return (void __user *)(unsigned long)value;
+ }
+
+-static inline __u64
+-uptr_to_u64(void __user *ptr)
++static inline __u64 uptr_to_u64(void __user *ptr)
+ {
+ return (__u64)(unsigned long)ptr;
+ }
+@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inod
+ {
+ struct fw_device *device;
+ struct client *client;
+- unsigned long flags;
+
+ device = fw_device_get_by_devt(inode->i_rdev);
+ if (device == NULL)
+@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inod
+ }
+
+ client->device = device;
+- INIT_LIST_HEAD(&client->event_list);
+- INIT_LIST_HEAD(&client->resource_list);
+ spin_lock_init(&client->lock);
++ idr_init(&client->resource_idr);
++ INIT_LIST_HEAD(&client->event_list);
+ init_waitqueue_head(&client->wait);
++ kref_init(&client->kref);
+
+ file->private_data = client;
+
+- spin_lock_irqsave(&device->card->lock, flags);
++ mutex_lock(&device->client_list_mutex);
+ list_add_tail(&client->link, &device->client_list);
+- spin_unlock_irqrestore(&device->card->lock, flags);
++ mutex_unlock(&device->client_list_mutex);
+
+ return 0;
+ }
+@@ -150,68 +225,69 @@ static void queue_event(struct client *c
+ event->v[1].size = size1;
+
+ spin_lock_irqsave(&client->lock, flags);
+- list_add_tail(&event->link, &client->event_list);
++ if (client->in_shutdown)
++ kfree(event);
++ else
++ list_add_tail(&event->link, &client->event_list);
+ spin_unlock_irqrestore(&client->lock, flags);
+
+ wake_up_interruptible(&client->wait);
+ }
+
+-static int
+-dequeue_event(struct client *client, char __user *buffer, size_t count)
++static int dequeue_event(struct client *client,
++ char __user *buffer, size_t count)
+ {
+- unsigned long flags;
+ struct event *event;
+ size_t size, total;
+- int i, retval;
++ int i, ret;
+
+- retval = wait_event_interruptible(client->wait,
+- !list_empty(&client->event_list) ||
+- fw_device_is_shutdown(client->device));
+- if (retval < 0)
+- return retval;
++ ret = wait_event_interruptible(client->wait,
++ !list_empty(&client->event_list) ||
++ fw_device_is_shutdown(client->device));
++ if (ret < 0)
++ return ret;
+
+ if (list_empty(&client->event_list) &&
+ fw_device_is_shutdown(client->device))
+ return -ENODEV;
+
+- spin_lock_irqsave(&client->lock, flags);
+- event = container_of(client->event_list.next, struct event, link);
++ spin_lock_irq(&client->lock);
++ event = list_first_entry(&client->event_list, struct event, link);
+ list_del(&event->link);
+- spin_unlock_irqrestore(&client->lock, flags);
++ spin_unlock_irq(&client->lock);
+
+ total = 0;
+ for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
+ size = min(event->v[i].size, count - total);
+ if (copy_to_user(buffer + total, event->v[i].data, size)) {
+- retval = -EFAULT;
++ ret = -EFAULT;
+ goto out;
+ }
+ total += size;
+ }
+- retval = total;
++ ret = total;
+
+ out:
+ kfree(event);
+
+- return retval;
++ return ret;
+ }
+
+-static ssize_t
+-fw_device_op_read(struct file *file,
+- char __user *buffer, size_t count, loff_t *offset)
++static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
++ size_t count, loff_t *offset)
+ {
+ struct client *client = file->private_data;
+
+ return dequeue_event(client, buffer, count);
+ }
+
+-/* caller must hold card->lock so that node pointers can be dereferenced here */
+-static void
+-fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
+- struct client *client)
++static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
++ struct client *client)
+ {
+ struct fw_card *card = client->device->card;
+
++ spin_lock_irq(&card->lock);
++
+ event->closure = client->bus_reset_closure;
+ event->type = FW_CDEV_EVENT_BUS_RESET;
+ event->generation = client->device->generation;
+@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_even
+ event->bm_node_id = 0; /* FIXME: We don't track the BM. */
+ event->irm_node_id = card->irm_node->node_id;
+ event->root_node_id = card->root_node->node_id;
++
++ spin_unlock_irq(&card->lock);
+ }
+
+-static void
+-for_each_client(struct fw_device *device,
+- void (*callback)(struct client *client))
++static void for_each_client(struct fw_device *device,
++ void (*callback)(struct client *client))
+ {
+- struct fw_card *card = device->card;
+ struct client *c;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&card->lock, flags);
+
++ mutex_lock(&device->client_list_mutex);
+ list_for_each_entry(c, &device->client_list, link)
+ callback(c);
++ mutex_unlock(&device->client_list_mutex);
++}
++
++static int schedule_reallocations(int id, void *p, void *data)
++{
++ struct client_resource *r = p;
+
+- spin_unlock_irqrestore(&card->lock, flags);
++ if (r->release == release_iso_resource)
++ schedule_iso_resource(container_of(r,
++ struct iso_resource, resource));
++ return 0;
+ }
+
+-static void
+-queue_bus_reset_event(struct client *client)
++static void queue_bus_reset_event(struct client *client)
+ {
+- struct bus_reset *bus_reset;
++ struct bus_reset_event *e;
+
+- bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
+- if (bus_reset == NULL) {
++ e = kzalloc(sizeof(*e), GFP_KERNEL);
++ if (e == NULL) {
+ fw_notify("Out of memory when allocating bus reset event\n");
+ return;
+ }
+
+- fill_bus_reset_event(&bus_reset->reset, client);
++ fill_bus_reset_event(&e->reset, client);
++
++ queue_event(client, &e->event,
++ &e->reset, sizeof(e->reset), NULL, 0);
+
+- queue_event(client, &bus_reset->event,
+- &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
++ spin_lock_irq(&client->lock);
++ idr_for_each(&client->resource_idr, schedule_reallocations, client);
++ spin_unlock_irq(&client->lock);
+ }
+
+ void fw_device_cdev_update(struct fw_device *device)
+@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client
+ {
+ struct fw_cdev_get_info *get_info = buffer;
+ struct fw_cdev_event_bus_reset bus_reset;
+- struct fw_card *card = client->device->card;
+ unsigned long ret = 0;
+
+ client->version = get_info->version;
+ get_info->version = FW_CDEV_VERSION;
++ get_info->card = client->device->card->index;
+
+ down_read(&fw_device_rwsem);
+
+@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client
+ client->bus_reset_closure = get_info->bus_reset_closure;
+ if (get_info->bus_reset != 0) {
+ void __user *uptr = u64_to_uptr(get_info->bus_reset);
+- unsigned long flags;
+
+- spin_lock_irqsave(&card->lock, flags);
+ fill_bus_reset_event(&bus_reset, client);
+- spin_unlock_irqrestore(&card->lock, flags);
+-
+ if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
+ return -EFAULT;
+ }
+
+- get_info->card = card->index;
+-
+ return 0;
+ }
+
+-static void
+-add_client_resource(struct client *client, struct client_resource *resource)
++static int add_client_resource(struct client *client,
++ struct client_resource *resource, gfp_t gfp_mask)
+ {
+ unsigned long flags;
++ int ret;
++
++ retry:
++ if (idr_pre_get(&client->resource_idr, gfp_mask | __GFP_ZERO) == 0)
++ return -ENOMEM;
+
+ spin_lock_irqsave(&client->lock, flags);
+- list_add_tail(&resource->link, &client->resource_list);
+- resource->handle = client->resource_handle++;
++ if (client->in_shutdown)
++ ret = -ECANCELED;
++ else
++ ret = idr_get_new(&client->resource_idr, resource,
++ &resource->handle);
++ if (ret >= 0) {
++ client_get(client);
++ if (resource->release == release_iso_resource)
++ schedule_iso_resource(container_of(resource,
++ struct iso_resource, resource));
++ }
+ spin_unlock_irqrestore(&client->lock, flags);
++
++ if (ret == -EAGAIN)
++ goto retry;
++
++ return ret < 0 ? ret : 0;
+ }
+
+-static int
+-release_client_resource(struct client *client, u32 handle,
+- struct client_resource **resource)
++static int release_client_resource(struct client *client, u32 handle,
++ client_resource_release_fn_t release,
++ struct client_resource **resource)
+ {
+ struct client_resource *r;
+- unsigned long flags;
+
+- spin_lock_irqsave(&client->lock, flags);
+- list_for_each_entry(r, &client->resource_list, link) {
+- if (r->handle == handle) {
+- list_del(&r->link);
+- break;
+- }
+- }
+- spin_unlock_irqrestore(&client->lock, flags);
++ spin_lock_irq(&client->lock);
++ if (client->in_shutdown)
++ r = NULL;
++ else
++ r = idr_find(&client->resource_idr, handle);
++ if (r && r->release == release)
++ idr_remove(&client->resource_idr, handle);
++ spin_unlock_irq(&client->lock);
+
+- if (&r->link == &client->resource_list)
++ if (!(r && r->release == release))
+ return -EINVAL;
+
+ if (resource)
+@@ -350,203 +448,242 @@ release_client_resource(struct client *c
+ else
+ r->release(client, r);
+
++ client_put(client);
++
+ return 0;
+ }
+
+-static void
+-release_transaction(struct client *client, struct client_resource *resource)
++static void release_transaction(struct client *client,
++ struct client_resource *resource)
+ {
+- struct response *response =
+- container_of(resource, struct response, resource);
++ struct outbound_transaction_resource *r = container_of(resource,
++ struct outbound_transaction_resource, resource);
+
+- fw_cancel_transaction(client->device->card, &response->transaction);
++ fw_cancel_transaction(client->device->card, &r->transaction);
+ }
+
+-static void
+-complete_transaction(struct fw_card *card, int rcode,
+- void *payload, size_t length, void *data)
++static void complete_transaction(struct fw_card *card, int rcode,
++ void *payload, size_t length, void *data)
+ {
+- struct response *response = data;
+- struct client *client = response->client;
++ struct outbound_transaction_event *e = data;
++ struct fw_cdev_event_response *rsp = &e->response;
++ struct client *client = e->client;
+ unsigned long flags;
+- struct fw_cdev_event_response *r = &response->response;
+
+- if (length < r->length)
+- r->length = length;
++ if (length < rsp->length)
++ rsp->length = length;
+ if (rcode == RCODE_COMPLETE)
+- memcpy(r->data, payload, r->length);
++ memcpy(rsp->data, payload, rsp->length);
+
+ spin_lock_irqsave(&client->lock, flags);
+- list_del(&response->resource.link);
++ /*
++ * 1. If called while in shutdown, the idr tree must be left untouched.
++ * The idr handle will be removed and the client reference will be
++ * dropped later.
++ * 2. If the call chain was release_client_resource ->
++ * release_transaction -> complete_transaction (instead of a normal
++ * conclusion of the transaction), i.e. if this resource was already
++ * unregistered from the idr, the client reference will be dropped
++ * by release_client_resource and we must not drop it here.
++ */
++ if (!client->in_shutdown &&
++ idr_find(&client->resource_idr, e->r.resource.handle)) {
++ idr_remove(&client->resource_idr, e->r.resource.handle);
++ /* Drop the idr's reference */
++ client_put(client);
++ }
+ spin_unlock_irqrestore(&client->lock, flags);
+
+- r->type = FW_CDEV_EVENT_RESPONSE;
+- r->rcode = rcode;
++ rsp->type = FW_CDEV_EVENT_RESPONSE;
++ rsp->rcode = rcode;
+
+ /*
+- * In the case that sizeof(*r) doesn't align with the position of the
++ * In the case that sizeof(*rsp) doesn't align with the position of the
+ * data, and the read is short, preserve an extra copy of the data
+ * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
+ * for short reads and some apps depended on it, this is both safe
+ * and prudent for compatibility.
+ */
+- if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
+- queue_event(client, &response->event, r, sizeof(*r),
+- r->data, r->length);
++ if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
++ queue_event(client, &e->event, rsp, sizeof(*rsp),
++ rsp->data, rsp->length);
+ else
+- queue_event(client, &response->event, r, sizeof(*r) + r->length,
++ queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
+ NULL, 0);
++
++ /* Drop the transaction callback's reference */
++ client_put(client);
+ }
+
+-static int ioctl_send_request(struct client *client, void *buffer)
++static int init_request(struct client *client,
++ struct fw_cdev_send_request *request,
++ int destination_id, int speed)
+ {
+- struct fw_device *device = client->device;
+- struct fw_cdev_send_request *request = buffer;
+- struct response *response;
++ struct outbound_transaction_event *e;
++ int ret;
+
+- /* What is the biggest size we'll accept, really? */
+- if (request->length > 4096)
+- return -EINVAL;
++ if (request->length > 4096 || request->length > 512 << speed)
++ return -EIO;
+
+- response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
+- if (response == NULL)
++ e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
++ if (e == NULL)
+ return -ENOMEM;
+
+- response->client = client;
+- response->response.length = request->length;
+- response->response.closure = request->closure;
++ e->client = client;
++ e->response.length = request->length;
++ e->response.closure = request->closure;
+
+ if (request->data &&
+- copy_from_user(response->response.data,
++ copy_from_user(e->response.data,
+ u64_to_uptr(request->data), request->length)) {
+- kfree(response);
+- return -EFAULT;
++ ret = -EFAULT;
++ goto failed;
+ }
+
+- response->resource.release = release_transaction;
+- add_client_resource(client, &response->resource);
+-
+- fw_send_request(device->card, &response->transaction,
+- request->tcode & 0x1f,
+- device->node->node_id,
+- request->generation,
+- device->max_speed,
+- request->offset,
+- response->response.data, request->length,
+- complete_transaction, response);
++ e->r.resource.release = release_transaction;
++ ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
++ if (ret < 0)
++ goto failed;
++
++ /* Get a reference for the transaction callback */
++ client_get(client);
++
++ fw_send_request(client->device->card, &e->r.transaction,
++ request->tcode & 0x1f, destination_id,
++ request->generation, speed, request->offset,
++ e->response.data, request->length,
++ complete_transaction, e);
+
+ if (request->data)
+ return sizeof(request) + request->length;
+ else
+ return sizeof(request);
++ failed:
++ kfree(e);
++
++ return ret;
+ }
+
+-struct address_handler {
+- struct fw_address_handler handler;
+- __u64 closure;
+- struct client *client;
+- struct client_resource resource;
+-};
++static int ioctl_send_request(struct client *client, void *buffer)
++{
++ struct fw_cdev_send_request *request = buffer;
+
+-struct request {
+- struct fw_request *request;
+- void *data;
+- size_t length;
+- struct client_resource resource;
+-};
++ switch (request->tcode) {
++ case TCODE_WRITE_QUADLET_REQUEST:
++ case TCODE_WRITE_BLOCK_REQUEST:
++ case TCODE_READ_QUADLET_REQUEST:
++ case TCODE_READ_BLOCK_REQUEST:
++ case TCODE_LOCK_MASK_SWAP:
++ case TCODE_LOCK_COMPARE_SWAP:
++ case TCODE_LOCK_FETCH_ADD:
++ case TCODE_LOCK_LITTLE_ADD:
++ case TCODE_LOCK_BOUNDED_ADD:
++ case TCODE_LOCK_WRAP_ADD:
++ case TCODE_LOCK_VENDOR_DEPENDENT:
++ break;
++ default:
++ return -EINVAL;
++ }
+
+-struct request_event {
+- struct event event;
+- struct fw_cdev_event_request request;
+-};
++ return init_request(client, request, client->device->node->node_id,
++ client->device->max_speed);
++}
+
+-static void
+-release_request(struct client *client, struct client_resource *resource)
++static void release_request(struct client *client,
++ struct client_resource *resource)
+ {
+- struct request *request =
+- container_of(resource, struct request, resource);
++ struct inbound_transaction_resource *r = container_of(resource,
++ struct inbound_transaction_resource, resource);
+
+- fw_send_response(client->device->card, request->request,
++ fw_send_response(client->device->card, r->request,
+ RCODE_CONFLICT_ERROR);
+- kfree(request);
++ kfree(r);
+ }
+
+-static void
+-handle_request(struct fw_card *card, struct fw_request *r,
+- int tcode, int destination, int source,
+- int generation, int speed,
+- unsigned long long offset,
+- void *payload, size_t length, void *callback_data)
+-{
+- struct address_handler *handler = callback_data;
+- struct request *request;
+- struct request_event *e;
+- struct client *client = handler->client;
++static void handle_request(struct fw_card *card, struct fw_request *request,
++ int tcode, int destination, int source,
++ int generation, int speed,
++ unsigned long long offset,
++ void *payload, size_t length, void *callback_data)
++{
++ struct address_handler_resource *handler = callback_data;
++ struct inbound_transaction_resource *r;
++ struct inbound_transaction_event *e;
++ int ret;
+
+- request = kmalloc(sizeof(*request), GFP_ATOMIC);
++ r = kmalloc(sizeof(*r), GFP_ATOMIC);
+ e = kmalloc(sizeof(*e), GFP_ATOMIC);
+- if (request == NULL || e == NULL) {
+- kfree(request);
+- kfree(e);
+- fw_send_response(card, r, RCODE_CONFLICT_ERROR);
+- return;
+- }
+-
+- request->request = r;
+- request->data = payload;
+- request->length = length;
++ if (r == NULL || e == NULL)
++ goto failed;
+
+- request->resource.release = release_request;
+- add_client_resource(client, &request->resource);
++ r->request = request;
++ r->data = payload;
++ r->length = length;
++
++ r->resource.release = release_request;
++ ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
++ if (ret < 0)
++ goto failed;
+
+ e->request.type = FW_CDEV_EVENT_REQUEST;
+ e->request.tcode = tcode;
+ e->request.offset = offset;
+ e->request.length = length;
+- e->request.handle = request->resource.handle;
++ e->request.handle = r->resource.handle;
+ e->request.closure = handler->closure;
+
+- queue_event(client, &e->event,
++ queue_event(handler->client, &e->event,
+ &e->request, sizeof(e->request), payload, length);
++ return;
++
++ failed:
++ kfree(r);
++ kfree(e);
++ fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+ }
+
+-static void
+-release_address_handler(struct client *client,
+- struct client_resource *resource)
++static void release_address_handler(struct client *client,
++ struct client_resource *resource)
+ {
+- struct address_handler *handler =
+- container_of(resource, struct address_handler, resource);
++ struct address_handler_resource *r =
++ container_of(resource, struct address_handler_resource, resource);
+
+- fw_core_remove_address_handler(&handler->handler);
+- kfree(handler);
++ fw_core_remove_address_handler(&r->handler);
++ kfree(r);
+ }
+
+ static int ioctl_allocate(struct client *client, void *buffer)
+ {
+ struct fw_cdev_allocate *request = buffer;
+- struct address_handler *handler;
++ struct address_handler_resource *r;
+ struct fw_address_region region;
++ int ret;
+
+- handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+- if (handler == NULL)
++ r = kmalloc(sizeof(*r), GFP_KERNEL);
++ if (r == NULL)
+ return -ENOMEM;
+
+ region.start = request->offset;
+ region.end = request->offset + request->length;
+- handler->handler.length = request->length;
+- handler->handler.address_callback = handle_request;
+- handler->handler.callback_data = handler;
+- handler->closure = request->closure;
+- handler->client = client;
+-
+- if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
+- kfree(handler);
+- return -EBUSY;
++ r->handler.length = request->length;
++ r->handler.address_callback = handle_request;
++ r->handler.callback_data = r;
++ r->closure = request->closure;
++ r->client = client;
++
++ ret = fw_core_add_address_handler(&r->handler, &region);
++ if (ret < 0) {
++ kfree(r);
++ return ret;
+ }
+
+- handler->resource.release = release_address_handler;
+- add_client_resource(client, &handler->resource);
+- request->handle = handler->resource.handle;
++ r->resource.release = release_address_handler;
++ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
++ if (ret < 0) {
++ release_address_handler(client, &r->resource);
++ return ret;
++ }
++ request->handle = r->resource.handle;
+
+ return 0;
+ }
+@@ -555,18 +692,22 @@ static int ioctl_deallocate(struct clien
+ {
+ struct fw_cdev_deallocate *request = buffer;
+
+- return release_client_resource(client, request->handle, NULL);
++ return release_client_resource(client, request->handle,
++ release_address_handler, NULL);
+ }
+
+ static int ioctl_send_response(struct client *client, void *buffer)
+ {
+ struct fw_cdev_send_response *request = buffer;
+ struct client_resource *resource;
+- struct request *r;
++ struct inbound_transaction_resource *r;
+
+- if (release_client_resource(client, request->handle, &resource) < 0)
++ if (release_client_resource(client, request->handle,
++ release_request, &resource) < 0)
+ return -EINVAL;
+- r = container_of(resource, struct request, resource);
++
++ r = container_of(resource, struct inbound_transaction_resource,
++ resource);
+ if (request->length < r->length)
+ r->length = request->length;
+ if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
+@@ -588,85 +729,84 @@ static int ioctl_initiate_bus_reset(stru
+ return fw_core_initiate_bus_reset(client->device->card, short_reset);
+ }
+
+-struct descriptor {
+- struct fw_descriptor d;
+- struct client_resource resource;
+- u32 data[0];
+-};
+-
+ static void release_descriptor(struct client *client,
+ struct client_resource *resource)
+ {
+- struct descriptor *descriptor =
+- container_of(resource, struct descriptor, resource);
++ struct descriptor_resource *r =
++ container_of(resource, struct descriptor_resource, resource);
+
+- fw_core_remove_descriptor(&descriptor->d);
+- kfree(descriptor);
++ fw_core_remove_descriptor(&r->descriptor);
++ kfree(r);
+ }
+
+ static int ioctl_add_descriptor(struct client *client, void *buffer)
+ {
+ struct fw_cdev_add_descriptor *request = buffer;
+- struct descriptor *descriptor;
+- int retval;
++ struct descriptor_resource *r;
++ int ret;
+
+ if (request->length > 256)
+ return -EINVAL;
+
+- descriptor =
+- kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
+- if (descriptor == NULL)
++ r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
++ if (r == NULL)
+ return -ENOMEM;
+
+- if (copy_from_user(descriptor->data,
++ if (copy_from_user(r->data,
+ u64_to_uptr(request->data), request->length * 4)) {
+- kfree(descriptor);
+- return -EFAULT;
++ ret = -EFAULT;
++ goto failed;
+ }
+
+- descriptor->d.length = request->length;
+- descriptor->d.immediate = request->immediate;
+- descriptor->d.key = request->key;
+- descriptor->d.data = descriptor->data;
+-
+- retval = fw_core_add_descriptor(&descriptor->d);
+- if (retval < 0) {
+- kfree(descriptor);
+- return retval;
++ r->descriptor.length = request->length;
++ r->descriptor.immediate = request->immediate;
++ r->descriptor.key = request->key;
++ r->descriptor.data = r->data;
++
++ ret = fw_core_add_descriptor(&r->descriptor);
++ if (ret < 0)
++ goto failed;
++
++ r->resource.release = release_descriptor;
++ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
++ if (ret < 0) {
++ fw_core_remove_descriptor(&r->descriptor);
++ goto failed;
+ }
+-
+- descriptor->resource.release = release_descriptor;
+- add_client_resource(client, &descriptor->resource);
+- request->handle = descriptor->resource.handle;
++ request->handle = r->resource.handle;
+
+ return 0;
++ failed:
++ kfree(r);
++
++ return ret;
+ }
+
+ static int ioctl_remove_descriptor(struct client *client, void *buffer)
+ {
+ struct fw_cdev_remove_descriptor *request = buffer;
+
+- return release_client_resource(client, request->handle, NULL);
++ return release_client_resource(client, request->handle,
++ release_descriptor, NULL);
+ }
+
+-static void
+-iso_callback(struct fw_iso_context *context, u32 cycle,
+- size_t header_length, void *header, void *data)
++static void iso_callback(struct fw_iso_context *context, u32 cycle,
++ size_t header_length, void *header, void *data)
+ {
+ struct client *client = data;
+- struct iso_interrupt *irq;
++ struct iso_interrupt_event *e;
+
+- irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
+- if (irq == NULL)
++ e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
++ if (e == NULL)
+ return;
+
+- irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
+- irq->interrupt.closure = client->iso_closure;
+- irq->interrupt.cycle = cycle;
+- irq->interrupt.header_length = header_length;
+- memcpy(irq->interrupt.header, header, header_length);
+- queue_event(client, &irq->event, &irq->interrupt,
+- sizeof(irq->interrupt) + header_length, NULL, 0);
++ e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
++ e->interrupt.closure = client->iso_closure;
++ e->interrupt.cycle = cycle;
++ e->interrupt.header_length = header_length;
++ memcpy(e->interrupt.header, header, header_length);
++ queue_event(client, &e->event, &e->interrupt,
++ sizeof(e->interrupt) + header_length, NULL, 0);
+ }
+
+ static int ioctl_create_iso_context(struct client *client, void *buffer)
+@@ -871,6 +1011,237 @@ static int ioctl_get_cycle_timer(struct
+ return 0;
+ }
+
++static void iso_resource_work(struct work_struct *work)
++{
++ struct iso_resource_event *e;
++ struct iso_resource *r =
++ container_of(work, struct iso_resource, work.work);
++ struct client *client = r->client;
++ int generation, channel, bandwidth, todo;
++ bool skip, free, success;
++
++ spin_lock_irq(&client->lock);
++ generation = client->device->generation;
++ todo = r->todo;
++ /* Allow 1000ms grace period for other reallocations. */
++ if (todo == ISO_RES_ALLOC &&
++ time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
++ if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
++ client_get(client);
++ skip = true;
++ } else {
++ /* We could be called twice within the same generation. */
++ skip = todo == ISO_RES_REALLOC &&
++ r->generation == generation;
++ }
++ free = todo == ISO_RES_DEALLOC ||
++ todo == ISO_RES_ALLOC_ONCE ||
++ todo == ISO_RES_DEALLOC_ONCE;
++ r->generation = generation;
++ spin_unlock_irq(&client->lock);
++
++ if (skip)
++ goto out;
++
++ bandwidth = r->bandwidth;
++
++ fw_iso_resource_manage(client->device->card, generation,
++ r->channels, &channel, &bandwidth,
++ todo == ISO_RES_ALLOC ||
++ todo == ISO_RES_REALLOC ||
++ todo == ISO_RES_ALLOC_ONCE);
++ /*
++ * Is this generation outdated already? As long as this resource sticks
++ * in the idr, it will be scheduled again for a newer generation or at
++ * shutdown.
++ */
++ if (channel == -EAGAIN &&
++ (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
++ goto out;
++
++ success = channel >= 0 || bandwidth > 0;
++
++ spin_lock_irq(&client->lock);
++ /*
++ * Transit from allocation to reallocation, except if the client
++ * requested deallocation in the meantime.
++ */
++ if (r->todo == ISO_RES_ALLOC)
++ r->todo = ISO_RES_REALLOC;
++ /*
++ * Allocation or reallocation failure? Pull this resource out of the
++ * idr and prepare for deletion, unless the client is shutting down.
++ */
++ if (r->todo == ISO_RES_REALLOC && !success &&
++ !client->in_shutdown &&
++ idr_find(&client->resource_idr, r->resource.handle)) {
++ idr_remove(&client->resource_idr, r->resource.handle);
++ client_put(client);
++ free = true;
++ }
++ spin_unlock_irq(&client->lock);
++
++ if (todo == ISO_RES_ALLOC && channel >= 0)
++ r->channels = 1ULL << channel;
++
++ if (todo == ISO_RES_REALLOC && success)
++ goto out;
++
++ if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
++ e = r->e_alloc;
++ r->e_alloc = NULL;
++ } else {
++ e = r->e_dealloc;
++ r->e_dealloc = NULL;
++ }
++ e->resource.handle = r->resource.handle;
++ e->resource.channel = channel;
++ e->resource.bandwidth = bandwidth;
++
++ queue_event(client, &e->event,
++ &e->resource, sizeof(e->resource), NULL, 0);
++
++ if (free) {
++ cancel_delayed_work(&r->work);
++ kfree(r->e_alloc);
++ kfree(r->e_dealloc);
++ kfree(r);
++ }
++ out:
++ client_put(client);
++}
++
++static void schedule_iso_resource(struct iso_resource *r)
++{
++ client_get(r->client);
++ if (!schedule_delayed_work(&r->work, 0))
++ client_put(r->client);
++}
++
++static void release_iso_resource(struct client *client,
++ struct client_resource *resource)
++{
++ struct iso_resource *r =
++ container_of(resource, struct iso_resource, resource);
++
++ spin_lock_irq(&client->lock);
++ r->todo = ISO_RES_DEALLOC;
++ schedule_iso_resource(r);
++ spin_unlock_irq(&client->lock);
++}
++
++static int init_iso_resource(struct client *client,
++ struct fw_cdev_allocate_iso_resource *request, int todo)
++{
++ struct iso_resource_event *e1, *e2;
++ struct iso_resource *r;
++ int ret;
++
++ if ((request->channels == 0 && request->bandwidth == 0) ||
++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
++ request->bandwidth < 0)
++ return -EINVAL;
++
++ r = kmalloc(sizeof(*r), GFP_KERNEL);
++ e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
++ e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
++ if (r == NULL || e1 == NULL || e2 == NULL) {
++ ret = -ENOMEM;
++ goto fail;
++ }
++
++ INIT_DELAYED_WORK(&r->work, iso_resource_work);
++ r->client = client;
++ r->todo = todo;
++ r->generation = -1;
++ r->channels = request->channels;
++ r->bandwidth = request->bandwidth;
++ r->e_alloc = e1;
++ r->e_dealloc = e2;
++
++ e1->resource.closure = request->closure;
++ e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
++ e2->resource.closure = request->closure;
++ e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
++
++ if (todo == ISO_RES_ALLOC) {
++ r->resource.release = release_iso_resource;
++ ret = add_client_resource(client, &r->resource, GFP_KERNEL);
++ if (ret < 0)
++ goto fail;
++ } else {
++ r->resource.release = NULL;
++ r->resource.handle = -1;
++ schedule_iso_resource(r);
++ }
++ request->handle = r->resource.handle;
++
++ return 0;
++ fail:
++ kfree(r);
++ kfree(e1);
++ kfree(e2);
++
++ return ret;
++}
++
++static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
++{
++ struct fw_cdev_allocate_iso_resource *request = buffer;
++
++ return init_iso_resource(client, request, ISO_RES_ALLOC);
++}
++
++static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
++{
++ struct fw_cdev_deallocate *request = buffer;
++
++ return release_client_resource(client, request->handle,
++ release_iso_resource, NULL);
++}
++
++static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
++{
++ struct fw_cdev_allocate_iso_resource *request = buffer;
++
++ return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
++}
++
++static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
++{
++ struct fw_cdev_allocate_iso_resource *request = buffer;
++
++ return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
++}
++
++static int ioctl_get_speed(struct client *client, void *buffer)
++{
++ struct fw_cdev_get_speed *request = buffer;
++
++ request->max_speed = client->device->max_speed;
++
++ return 0;
++}
++
++static int ioctl_send_broadcast_request(struct client *client, void *buffer)
++{
++ struct fw_cdev_send_request *request = buffer;
++
++ switch (request->tcode) {
++ case TCODE_WRITE_QUADLET_REQUEST:
++ case TCODE_WRITE_BLOCK_REQUEST:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Security policy: Only allow accesses to Units Space. */
++ if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
++ return -EACCES;
++
++ return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
++}
++
+ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
+ ioctl_get_info,
+ ioctl_send_request,
+@@ -885,13 +1256,19 @@ static int (* const ioctl_handlers[])(st
+ ioctl_start_iso,
+ ioctl_stop_iso,
+ ioctl_get_cycle_timer,
++ ioctl_allocate_iso_resource,
++ ioctl_deallocate_iso_resource,
++ ioctl_allocate_iso_resource_once,
++ ioctl_deallocate_iso_resource_once,
++ ioctl_get_speed,
++ ioctl_send_broadcast_request,
+ };
+
+-static int
+-dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
++static int dispatch_ioctl(struct client *client,
++ unsigned int cmd, void __user *arg)
+ {
+ char buffer[256];
+- int retval;
++ int ret;
+
+ if (_IOC_TYPE(cmd) != '#' ||
+ _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+@@ -903,9 +1280,9 @@ dispatch_ioctl(struct client *client, un
+ return -EFAULT;
+ }
+
+- retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+- if (retval < 0)
+- return retval;
++ ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
++ if (ret < 0)
++ return ret;
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+@@ -913,12 +1290,11 @@ dispatch_ioctl(struct client *client, un
+ return -EFAULT;
+ }
+
+- return retval;
++ return ret;
+ }
+
+-static long
+-fw_device_op_ioctl(struct file *file,
+- unsigned int cmd, unsigned long arg)
++static long fw_device_op_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
+ {
+ struct client *client = file->private_data;
+
+@@ -929,9 +1305,8 @@ fw_device_op_ioctl(struct file *file,
+ }
+
+ #ifdef CONFIG_COMPAT
+-static long
+-fw_device_op_compat_ioctl(struct file *file,
+- unsigned int cmd, unsigned long arg)
++static long fw_device_op_compat_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
+ {
+ struct client *client = file->private_data;
+
+@@ -947,7 +1322,7 @@ static int fw_device_op_mmap(struct file
+ struct client *client = file->private_data;
+ enum dma_data_direction direction;
+ unsigned long size;
+- int page_count, retval;
++ int page_count, ret;
+
+ if (fw_device_is_shutdown(client->device))
+ return -ENODEV;
+@@ -973,48 +1348,57 @@ static int fw_device_op_mmap(struct file
+ else
+ direction = DMA_FROM_DEVICE;
+
+- retval = fw_iso_buffer_init(&client->buffer, client->device->card,
+- page_count, direction);
+- if (retval < 0)
+- return retval;
++ ret = fw_iso_buffer_init(&client->buffer, client->device->card,
++ page_count, direction);
++ if (ret < 0)
++ return ret;
+
+- retval = fw_iso_buffer_map(&client->buffer, vma);
+- if (retval < 0)
++ ret = fw_iso_buffer_map(&client->buffer, vma);
++ if (ret < 0)
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+- return retval;
++ return ret;
++}
++
++static int shutdown_resource(int id, void *p, void *data)
++{
++ struct client_resource *r = p;
++ struct client *client = data;
++
++ r->release(client, r);
++ client_put(client);
++
++ return 0;
+ }
+
+ static int fw_device_op_release(struct inode *inode, struct file *file)
+ {
+ struct client *client = file->private_data;
+ struct event *e, *next_e;
+- struct client_resource *r, *next_r;
+- unsigned long flags;
+
+- if (client->buffer.pages)
+- fw_iso_buffer_destroy(&client->buffer, client->device->card);
++ mutex_lock(&client->device->client_list_mutex);
++ list_del(&client->link);
++ mutex_unlock(&client->device->client_list_mutex);
+
+ if (client->iso_context)
+ fw_iso_context_destroy(client->iso_context);
+
+- list_for_each_entry_safe(r, next_r, &client->resource_list, link)
+- r->release(client, r);
++ if (client->buffer.pages)
++ fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+- /*
+- * FIXME: We should wait for the async tasklets to stop
+- * running before freeing the memory.
+- */
++ /* Freeze client->resource_idr and client->event_list */
++ spin_lock_irq(&client->lock);
++ client->in_shutdown = true;
++ spin_unlock_irq(&client->lock);
++
++ idr_for_each(&client->resource_idr, shutdown_resource, client);
++ idr_remove_all(&client->resource_idr);
++ idr_destroy(&client->resource_idr);
+
+ list_for_each_entry_safe(e, next_e, &client->event_list, link)
+ kfree(e);
+
+- spin_lock_irqsave(&client->device->card->lock, flags);
+- list_del(&client->link);
+- spin_unlock_irqrestore(&client->device->card->lock, flags);
+-
+- fw_device_put(client->device);
+- kfree(client);
++ client_put(client);
+
+ return 0;
+ }
+diff -Naurp linux-2.6-git/drivers/firewire/fw-device.c firewire-git/drivers/firewire/fw-device.c
+--- linux-2.6-git/drivers/firewire/fw-device.c 2009-01-30 13:39:02.989651512 -0500
++++ firewire-git/drivers/firewire/fw-device.c 2009-01-30 13:35:51.860646788 -0500
+@@ -27,8 +27,10 @@
+ #include <linux/idr.h>
+ #include <linux/jiffies.h>
+ #include <linux/string.h>
++#include <linux/mutex.h>
+ #include <linux/rwsem.h>
+ #include <linux/semaphore.h>
++#include <linux/spinlock.h>
+ #include <asm/system.h>
+ #include <linux/ctype.h>
+ #include "fw-transaction.h"
+@@ -132,8 +134,7 @@ static int get_modalias(struct fw_unit *
+ vendor, model, specifier_id, version);
+ }
+
+-static int
+-fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
++static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
+ {
+ struct fw_unit *unit = fw_unit(dev);
+ char modalias[64];
+@@ -191,8 +192,8 @@ struct config_rom_attribute {
+ u32 key;
+ };
+
+-static ssize_t
+-show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
++static ssize_t show_immediate(struct device *dev,
++ struct device_attribute *dattr, char *buf)
+ {
+ struct config_rom_attribute *attr =
+ container_of(dattr, struct config_rom_attribute, attr);
+@@ -223,8 +224,8 @@ show_immediate(struct device *dev, struc
+ #define IMMEDIATE_ATTR(name, key) \
+ { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
+
+-static ssize_t
+-show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
++static ssize_t show_text_leaf(struct device *dev,
++ struct device_attribute *dattr, char *buf)
+ {
+ struct config_rom_attribute *attr =
+ container_of(dattr, struct config_rom_attribute, attr);
+@@ -293,10 +294,9 @@ static struct config_rom_attribute confi
+ TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
+ };
+
+-static void
+-init_fw_attribute_group(struct device *dev,
+- struct device_attribute *attrs,
+- struct fw_attribute_group *group)
++static void init_fw_attribute_group(struct device *dev,
++ struct device_attribute *attrs,
++ struct fw_attribute_group *group)
+ {
+ struct device_attribute *attr;
+ int i, j;
+@@ -319,9 +319,8 @@ init_fw_attribute_group(struct device *d
+ dev->groups = group->groups;
+ }
+
+-static ssize_t
+-modalias_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
++static ssize_t modalias_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+ struct fw_unit *unit = fw_unit(dev);
+ int length;
+@@ -332,9 +331,8 @@ modalias_show(struct device *dev,
+ return length + 1;
+ }
+
+-static ssize_t
+-rom_index_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
++static ssize_t rom_index_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+ struct fw_device *device = fw_device(dev->parent);
+ struct fw_unit *unit = fw_unit(dev);
+@@ -349,8 +347,8 @@ static struct device_attribute fw_unit_a
+ __ATTR_NULL,
+ };
+
+-static ssize_t
+-config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t config_rom_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+ struct fw_device *device = fw_device(dev);
+ size_t length;
+@@ -363,8 +361,8 @@ config_rom_show(struct device *dev, stru
+ return length;
+ }
+
+-static ssize_t
+-guid_show(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t guid_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+ struct fw_device *device = fw_device(dev);
+ int ret;
+@@ -383,8 +381,8 @@ static struct device_attribute fw_device
+ __ATTR_NULL,
+ };
+
+-static int
+-read_rom(struct fw_device *device, int generation, int index, u32 *data)
++static int read_rom(struct fw_device *device,
++ int generation, int index, u32 *data)
+ {
+ int rcode;
+
+@@ -1004,6 +1002,7 @@ void fw_node_event(struct fw_card *card,
+ device->node = fw_node_get(node);
+ device->node_id = node->node_id;
+ device->generation = card->generation;
++ mutex_init(&device->client_list_mutex);
+ INIT_LIST_HEAD(&device->client_list);
+
+ /*
+diff -Naurp linux-2.6-git/drivers/firewire/fw-device.h firewire-git/drivers/firewire/fw-device.h
+--- linux-2.6-git/drivers/firewire/fw-device.h 2009-01-30 13:39:02.989651512 -0500
++++ firewire-git/drivers/firewire/fw-device.h 2009-01-30 13:35:51.860646788 -0500
+@@ -23,6 +23,7 @@
+ #include <linux/cdev.h>
+ #include <linux/idr.h>
+ #include <linux/rwsem.h>
++#include <linux/mutex.h>
+ #include <asm/atomic.h>
+
+ enum fw_device_state {
+@@ -64,7 +65,10 @@ struct fw_device {
+ bool cmc;
+ struct fw_card *card;
+ struct device device;
++
++ struct mutex client_list_mutex;
+ struct list_head client_list;
++
+ u32 *config_rom;
+ size_t config_rom_length;
+ int config_rom_retries;
+@@ -176,8 +180,7 @@ struct fw_driver {
+ const struct fw_device_id *id_table;
+ };
+
+-static inline struct fw_driver *
+-fw_driver(struct device_driver *drv)
++static inline struct fw_driver *fw_driver(struct device_driver *drv)
+ {
+ return container_of(drv, struct fw_driver, driver);
+ }
+diff -Naurp linux-2.6-git/drivers/firewire/fw-iso.c firewire-git/drivers/firewire/fw-iso.c
+--- linux-2.6-git/drivers/firewire/fw-iso.c 2008-11-04 11:18:33.000000000 -0500
++++ firewire-git/drivers/firewire/fw-iso.c 2009-01-30 13:35:51.860646788 -0500
+@@ -1,5 +1,7 @@
+ /*
+- * Isochronous IO functionality
++ * Isochronous I/O functionality:
++ * - Isochronous DMA context management
++ * - Isochronous bus resource management (channels, bandwidth), client side
+ *
+ * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+@@ -18,21 +20,25 @@
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+ #include <linux/dma-mapping.h>
+-#include <linux/vmalloc.h>
++#include <linux/errno.h>
++#include <linux/firewire-constants.h>
++#include <linux/kernel.h>
+ #include <linux/mm.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
+
+-#include "fw-transaction.h"
+ #include "fw-topology.h"
+-#include "fw-device.h"
++#include "fw-transaction.h"
+
+-int
+-fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+- int page_count, enum dma_data_direction direction)
++/*
++ * Isochronous DMA context management
++ */
++
++int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
++ int page_count, enum dma_data_direction direction)
+ {
+- int i, j, retval = -ENOMEM;
++ int i, j;
+ dma_addr_t address;
+
+ buffer->page_count = page_count;
+@@ -69,19 +75,19 @@ fw_iso_buffer_init(struct fw_iso_buffer
+ kfree(buffer->pages);
+ out:
+ buffer->pages = NULL;
+- return retval;
++ return -ENOMEM;
+ }
+
+ int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
+ {
+ unsigned long uaddr;
+- int i, retval;
++ int i, ret;
+
+ uaddr = vma->vm_start;
+ for (i = 0; i < buffer->page_count; i++) {
+- retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
+- if (retval)
+- return retval;
++ ret = vm_insert_page(vma, uaddr, buffer->pages[i]);
++ if (ret)
++ return ret;
+ uaddr += PAGE_SIZE;
+ }
+
+@@ -105,14 +111,14 @@ void fw_iso_buffer_destroy(struct fw_iso
+ buffer->pages = NULL;
+ }
+
+-struct fw_iso_context *
+-fw_iso_context_create(struct fw_card *card, int type,
+- int channel, int speed, size_t header_size,
+- fw_iso_callback_t callback, void *callback_data)
++struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
++ int type, int channel, int speed, size_t header_size,
++ fw_iso_callback_t callback, void *callback_data)
+ {
+ struct fw_iso_context *ctx;
+
+- ctx = card->driver->allocate_iso_context(card, type, header_size);
++ ctx = card->driver->allocate_iso_context(card,
++ type, channel, header_size);
+ if (IS_ERR(ctx))
+ return ctx;
+
+@@ -134,25 +140,186 @@ void fw_iso_context_destroy(struct fw_is
+ card->driver->free_iso_context(ctx);
+ }
+
+-int
+-fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
++int fw_iso_context_start(struct fw_iso_context *ctx,
++ int cycle, int sync, int tags)
+ {
+ return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
+ }
+
+-int
+-fw_iso_context_queue(struct fw_iso_context *ctx,
+- struct fw_iso_packet *packet,
+- struct fw_iso_buffer *buffer,
+- unsigned long payload)
++int fw_iso_context_queue(struct fw_iso_context *ctx,
++ struct fw_iso_packet *packet,
++ struct fw_iso_buffer *buffer,
++ unsigned long payload)
+ {
+ struct fw_card *card = ctx->card;
+
+ return card->driver->queue_iso(ctx, packet, buffer, payload);
+ }
+
+-int
+-fw_iso_context_stop(struct fw_iso_context *ctx)
++int fw_iso_context_stop(struct fw_iso_context *ctx)
+ {
+ return ctx->card->driver->stop_iso(ctx);
+ }
++
++/*
++ * Isochronous bus resource management (channels, bandwidth), client side
++ */
++
++static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
++ int bandwidth, bool allocate)
++{
++ __be32 data[2];
++ int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
++
++ /*
++ * On a 1394a IRM with low contention, try < 1 is enough.
++ * On a 1394-1995 IRM, we need at least try < 2.
++ * Let's just do try < 5.
++ */
++ for (try = 0; try < 5; try++) {
++ new = allocate ? old - bandwidth : old + bandwidth;
++ if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
++ break;
++
++ data[0] = cpu_to_be32(old);
++ data[1] = cpu_to_be32(new);
++ switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
++ irm_id, generation, SCODE_100,
++ CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
++ data, sizeof(data))) {
++ case RCODE_GENERATION:
++ /* A generation change frees all bandwidth. */
++ return allocate ? -EAGAIN : bandwidth;
++
++ case RCODE_COMPLETE:
++ if (be32_to_cpup(data) == old)
++ return bandwidth;
++
++ old = be32_to_cpup(data);
++ /* Fall through. */
++ }
++ }
++
++ return -EIO;
++}
++
++static int manage_channel(struct fw_card *card, int irm_id, int generation,
++ u32 channels_mask, u64 offset, bool allocate)
++{
++ __be32 data[2], c, all, old;
++ int i, retry = 5;
++
++ old = all = allocate ? cpu_to_be32(~0) : 0;
++
++ for (i = 0; i < 32; i++) {
++ if (!(channels_mask & 1 << i))
++ continue;
++
++ c = cpu_to_be32(1 << (31 - i));
++ if ((old & c) != (all & c))
++ continue;
++
++ data[0] = old;
++ data[1] = old ^ c;
++ switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
++ irm_id, generation, SCODE_100,
++ offset, data, sizeof(data))) {
++ case RCODE_GENERATION:
++ /* A generation change frees all channels. */
++ return allocate ? -EAGAIN : i;
++
++ case RCODE_COMPLETE:
++ if (data[0] == old)
++ return i;
++
++ old = data[0];
++
++ /* Is the IRM 1394a-2000 compliant? */
++ if ((data[0] & c) == (data[1] & c))
++ continue;
++
++ /* 1394-1995 IRM, fall through to retry. */
++ default:
++ if (retry--)
++ i--;
++ }
++ }
++
++ return -EIO;
++}
++
++static void deallocate_channel(struct fw_card *card, int irm_id,
++ int generation, int channel)
++{
++ u32 mask;
++ u64 offset;
++
++ mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
++ offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
++ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
++
++ manage_channel(card, irm_id, generation, mask, offset, false);
++}
++
++/**
++ * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
++ *
++ * In parameters: card, generation, channels_mask, bandwidth, allocate
++ * Out parameters: channel, bandwidth
++ * This function blocks (sleeps) during communication with the IRM.
++ *
++ * Allocates or deallocates at most one channel out of channels_mask.
++ * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
++ * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
++ * channel 0 and LSB for channel 63.)
++ * Allocates or deallocates as many bandwidth allocation units as specified.
++ *
++ * Returns channel < 0 if no channel was allocated or deallocated.
++ * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
++ *
++ * If generation is stale, deallocations succeed but allocations fail with
++ * channel = -EAGAIN.
++ *
++ * If channel allocation fails, no bandwidth will be allocated either.
++ * If bandwidth allocation fails, no channel will be allocated either.
++ * But deallocations of channel and bandwidth are tried independently
++ * of each other's success.
++ */
++void fw_iso_resource_manage(struct fw_card *card, int generation,
++ u64 channels_mask, int *channel, int *bandwidth,
++ bool allocate)
++{
++ u32 channels_hi = channels_mask; /* channels 31...0 */
++ u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
++ int irm_id, ret, c = -EINVAL;
++
++ spin_lock_irq(&card->lock);
++ irm_id = card->irm_node->node_id;
++ spin_unlock_irq(&card->lock);
++
++ if (channels_hi)
++ c = manage_channel(card, irm_id, generation, channels_hi,
++ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
++ if (channels_lo && c < 0) {
++ c = manage_channel(card, irm_id, generation, channels_lo,
++ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
++ if (c >= 0)
++ c += 32;
++ }
++ *channel = c;
++
++ if (allocate && channels_mask != 0 && c < 0)
++ *bandwidth = 0;
++
++ if (*bandwidth == 0)
++ return;
++
++ ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
++ if (ret < 0)
++ *bandwidth = 0;
++
++ if (allocate && ret < 0 && c >= 0) {
++ deallocate_channel(card, irm_id, generation, c);
++ *channel = ret;
++ }
++}
+diff -Naurp linux-2.6-git/drivers/firewire/fw-ohci.c firewire-git/drivers/firewire/fw-ohci.c
+--- linux-2.6-git/drivers/firewire/fw-ohci.c 2009-01-30 13:39:02.990772025 -0500
++++ firewire-git/drivers/firewire/fw-ohci.c 2009-01-30 13:35:51.861646907 -0500
+@@ -205,6 +205,7 @@ struct fw_ohci {
+
+ u32 it_context_mask;
+ struct iso_context *it_context_list;
++ u64 ir_context_channels;
+ u32 ir_context_mask;
+ struct iso_context *ir_context_list;
+ };
+@@ -441,9 +442,8 @@ static inline void flush_writes(const st
+ reg_read(ohci, OHCI1394_Version);
+ }
+
+-static int
+-ohci_update_phy_reg(struct fw_card *card, int addr,
+- int clear_bits, int set_bits)
++static int ohci_update_phy_reg(struct fw_card *card, int addr,
++ int clear_bits, int set_bits)
+ {
+ struct fw_ohci *ohci = fw_ohci(card);
+ u32 val, old;
+@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned
+ }
+ }
+
+-static int
+-ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
++static int ar_context_init(struct ar_context *ctx,
++ struct fw_ohci *ohci, u32 regs)
+ {
+ struct ar_buffer ab;
+
+@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_con
+ flush_writes(ctx->ohci);
+ }
+
+-static struct descriptor *
+-find_branch_descriptor(struct descriptor *d, int z)
++static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
+ {
+ int b, key;
+
+@@ -751,8 +750,7 @@ static void context_tasklet(unsigned lon
+ * Allocate a new buffer and add it to the list of free buffers for this
+ * context. Must be called with ohci->lock held.
+ */
+-static int
+-context_add_buffer(struct context *ctx)
++static int context_add_buffer(struct context *ctx)
+ {
+ struct descriptor_buffer *desc;
+ dma_addr_t uninitialized_var(bus_addr);
+@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx)
+ return 0;
+ }
+
+-static int
+-context_init(struct context *ctx, struct fw_ohci *ohci,
+- u32 regs, descriptor_callback_t callback)
++static int context_init(struct context *ctx, struct fw_ohci *ohci,
++ u32 regs, descriptor_callback_t callback)
+ {
+ ctx->ohci = ohci;
+ ctx->regs = regs;
+@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct
+ return 0;
+ }
+
+-static void
+-context_release(struct context *ctx)
++static void context_release(struct context *ctx)
+ {
+ struct fw_card *card = &ctx->ohci->card;
+ struct descriptor_buffer *desc, *tmp;
+@@ -827,8 +823,8 @@ context_release(struct context *ctx)
+ }
+
+ /* Must be called with ohci->lock held */
+-static struct descriptor *
+-context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
++static struct descriptor *context_get_descriptors(struct context *ctx,
++ int z, dma_addr_t *d_bus)
+ {
+ struct descriptor *d = NULL;
+ struct descriptor_buffer *desc = ctx->buffer_tail;
+@@ -912,8 +908,8 @@ struct driver_data {
+ * Must always be called with the ochi->lock held to ensure proper
+ * generation handling and locking around packet queue manipulation.
+ */
+-static int
+-at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
++static int at_context_queue_packet(struct context *ctx,
++ struct fw_packet *packet)
+ {
+ struct fw_ohci *ohci = ctx->ohci;
+ dma_addr_t d_bus, uninitialized_var(payload_bus);
+@@ -1095,8 +1091,8 @@ static int handle_at_packet(struct conte
+ #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
+ #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
+
+-static void
+-handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
++static void handle_local_rom(struct fw_ohci *ohci,
++ struct fw_packet *packet, u32 csr)
+ {
+ struct fw_packet response;
+ int tcode, length, i;
+@@ -1122,8 +1118,8 @@ handle_local_rom(struct fw_ohci *ohci, s
+ fw_core_handle_response(&ohci->card, &response);
+ }
+
+-static void
+-handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
++static void handle_local_lock(struct fw_ohci *ohci,
++ struct fw_packet *packet, u32 csr)
+ {
+ struct fw_packet response;
+ int tcode, length, ext_tcode, sel;
+@@ -1164,8 +1160,7 @@ handle_local_lock(struct fw_ohci *ohci,
+ fw_core_handle_response(&ohci->card, &response);
+ }
+
+-static void
+-handle_local_request(struct context *ctx, struct fw_packet *packet)
++static void handle_local_request(struct context *ctx, struct fw_packet *packet)
+ {
+ u64 offset;
+ u32 csr;
+@@ -1205,11 +1200,10 @@ handle_local_request(struct context *ctx
+ }
+ }
+
+-static void
+-at_context_transmit(struct context *ctx, struct fw_packet *packet)
++static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
+ {
+ unsigned long flags;
+- int retval;
++ int ret;
+
+ spin_lock_irqsave(&ctx->ohci->lock, flags);
+
+@@ -1220,10 +1214,10 @@ at_context_transmit(struct context *ctx,
+ return;
+ }
+
+- retval = at_context_queue_packet(ctx, packet);
++ ret = at_context_queue_packet(ctx, packet);
+ spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+
+- if (retval < 0)
++ if (ret < 0)
+ packet->callback(packet, &ctx->ohci->card, packet->ack);
+
+ }
+@@ -1590,12 +1584,12 @@ static int ohci_enable(struct fw_card *c
+ return 0;
+ }
+
+-static int
+-ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
++static int ohci_set_config_rom(struct fw_card *card,
++ u32 *config_rom, size_t length)
+ {
+ struct fw_ohci *ohci;
+ unsigned long flags;
+- int retval = -EBUSY;
++ int ret = -EBUSY;
+ __be32 *next_config_rom;
+ dma_addr_t uninitialized_var(next_config_rom_bus);
+
+@@ -1649,7 +1643,7 @@ ohci_set_config_rom(struct fw_card *card
+
+ reg_write(ohci, OHCI1394_ConfigROMmap,
+ ohci->next_config_rom_bus);
+- retval = 0;
++ ret = 0;
+ }
+
+ spin_unlock_irqrestore(&ohci->lock, flags);
+@@ -1661,13 +1655,13 @@ ohci_set_config_rom(struct fw_card *card
+ * controller could need to access it before the bus reset
+ * takes effect.
+ */
+- if (retval == 0)
++ if (ret == 0)
+ fw_core_initiate_bus_reset(&ohci->card, 1);
+ else
+ dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ next_config_rom, next_config_rom_bus);
+
+- return retval;
++ return ret;
+ }
+
+ static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
+@@ -1689,7 +1683,7 @@ static int ohci_cancel_packet(struct fw_
+ struct fw_ohci *ohci = fw_ohci(card);
+ struct context *ctx = &ohci->at_request_ctx;
+ struct driver_data *driver_data = packet->driver_data;
+- int retval = -ENOENT;
++ int ret = -ENOENT;
+
+ tasklet_disable(&ctx->tasklet);
+
+@@ -1704,23 +1698,22 @@ static int ohci_cancel_packet(struct fw_
+ driver_data->packet = NULL;
+ packet->ack = RCODE_CANCELLED;
+ packet->callback(packet, &ohci->card, packet->ack);
+- retval = 0;
+-
++ ret = 0;
+ out:
+ tasklet_enable(&ctx->tasklet);
+
+- return retval;
++ return ret;
+ }
+
+-static int
+-ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
++static int ohci_enable_phys_dma(struct fw_card *card,
++ int node_id, int generation)
+ {
+ #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+ return 0;
+ #else
+ struct fw_ohci *ohci = fw_ohci(card);
+ unsigned long flags;
+- int n, retval = 0;
++ int n, ret = 0;
+
+ /*
+ * FIXME: Make sure this bitmask is cleared when we clear the busReset
+@@ -1730,7 +1723,7 @@ ohci_enable_phys_dma(struct fw_card *car
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ if (ohci->generation != generation) {
+- retval = -ESTALE;
++ ret = -ESTALE;
+ goto out;
+ }
+
+@@ -1748,12 +1741,12 @@ ohci_enable_phys_dma(struct fw_card *car
+ flush_writes(ohci);
+ out:
+ spin_unlock_irqrestore(&ohci->lock, flags);
+- return retval;
++
++ return ret;
+ #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
+ }
+
+-static u64
+-ohci_get_bus_time(struct fw_card *card)
++static u64 ohci_get_bus_time(struct fw_card *card)
+ {
+ struct fw_ohci *ohci = fw_ohci(card);
+ u32 cycle_time;
+@@ -1765,6 +1758,28 @@ ohci_get_bus_time(struct fw_card *card)
+ return bus_time;
+ }
+
++static void copy_iso_headers(struct iso_context *ctx, void *p)
++{
++ int i = ctx->header_length;
++
++ if (i + ctx->base.header_size > PAGE_SIZE)
++ return;
++
++ /*
++ * The iso header is byteswapped to little endian by
++ * the controller, but the remaining header quadlets
++ * are big endian. We want to present all the headers
++ * as big endian, so we have to swap the first quadlet.
++ */
++ if (ctx->base.header_size > 0)
++ *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
++ if (ctx->base.header_size > 4)
++ *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
++ if (ctx->base.header_size > 8)
++ memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
++ ctx->header_length += ctx->base.header_size;
++}
++
+ static int handle_ir_dualbuffer_packet(struct context *context,
+ struct descriptor *d,
+ struct descriptor *last)
+@@ -1775,7 +1790,6 @@ static int handle_ir_dualbuffer_packet(s
+ __le32 *ir_header;
+ size_t header_length;
+ void *p, *end;
+- int i;
+
+ if (db->first_res_count != 0 && db->second_res_count != 0) {
+ if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
+@@ -1788,25 +1802,14 @@ static int handle_ir_dualbuffer_packet(s
+ header_length = le16_to_cpu(db->first_req_count) -
+ le16_to_cpu(db->first_res_count);
+
+- i = ctx->header_length;
+ p = db + 1;
+ end = p + header_length;
+- while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
+- /*
+- * The iso header is byteswapped to little endian by
+- * the controller, but the remaining header quadlets
+- * are big endian. We want to present all the headers
+- * as big endian, so we have to swap the first
+- * quadlet.
+- */
+- *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+- memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
+- i += ctx->base.header_size;
++ while (p < end) {
++ copy_iso_headers(ctx, p);
+ ctx->excess_bytes +=
+ (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
+- p += ctx->base.header_size + 4;
++ p += max(ctx->base.header_size, (size_t)8);
+ }
+- ctx->header_length = i;
+
+ ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
+ le16_to_cpu(db->second_res_count);
+@@ -1832,7 +1835,6 @@ static int handle_ir_packet_per_buffer(s
+ struct descriptor *pd;
+ __le32 *ir_header;
+ void *p;
+- int i;
+
+ for (pd = d; pd <= last; pd++) {
+ if (pd->transfer_status)
+@@ -1842,21 +1844,8 @@ static int handle_ir_packet_per_buffer(s
+ /* Descriptor(s) not done yet, stop iteration */
+ return 0;
+
+- i = ctx->header_length;
+- p = last + 1;
+-
+- if (ctx->base.header_size > 0 &&
+- i + ctx->base.header_size <= PAGE_SIZE) {
+- /*
+- * The iso header is byteswapped to little endian by
+- * the controller, but the remaining header quadlets
+- * are big endian. We want to present all the headers
+- * as big endian, so we have to swap the first quadlet.
+- */
+- *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+- memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
+- ctx->header_length += ctx->base.header_size;
+- }
++ p = last + 1;
++ copy_iso_headers(ctx, p);
+
+ if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
+ ir_header = (__le32 *) p;
+@@ -1888,21 +1877,24 @@ static int handle_it_packet(struct conte
+ return 1;
+ }
+
+-static struct fw_iso_context *
+-ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
++static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
++ int type, int channel, size_t header_size)
+ {
+ struct fw_ohci *ohci = fw_ohci(card);
+ struct iso_context *ctx, *list;
+ descriptor_callback_t callback;
++ u64 *channels, dont_care = ~0ULL;
+ u32 *mask, regs;
+ unsigned long flags;
+- int index, retval = -ENOMEM;
++ int index, ret = -ENOMEM;
+
+ if (type == FW_ISO_CONTEXT_TRANSMIT) {
++ channels = &dont_care;
+ mask = &ohci->it_context_mask;
+ list = ohci->it_context_list;
+ callback = handle_it_packet;
+ } else {
++ channels = &ohci->ir_context_channels;
+ mask = &ohci->ir_context_mask;
+ list = ohci->ir_context_list;
+ if (ohci->use_dualbuffer)
+@@ -1912,9 +1904,11 @@ ohci_allocate_iso_context(struct fw_card
+ }
+
+ spin_lock_irqsave(&ohci->lock, flags);
+- index = ffs(*mask) - 1;
+- if (index >= 0)
++ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
++ if (index >= 0) {
++ *channels &= ~(1ULL << channel);
+ *mask &= ~(1 << index);
++ }
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ if (index < 0)
+@@ -1932,8 +1926,8 @@ ohci_allocate_iso_context(struct fw_card
+ if (ctx->header == NULL)
+ goto out;
+
+- retval = context_init(&ctx->context, ohci, regs, callback);
+- if (retval < 0)
++ ret = context_init(&ctx->context, ohci, regs, callback);
++ if (ret < 0)
+ goto out_with_header;
+
+ return &ctx->base;
+@@ -1945,7 +1939,7 @@ ohci_allocate_iso_context(struct fw_card
+ *mask |= 1 << index;
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+- return ERR_PTR(retval);
++ return ERR_PTR(ret);
+ }
+
+ static int ohci_start_iso(struct fw_iso_context *base,
+@@ -2024,16 +2018,16 @@ static void ohci_free_iso_context(struct
+ } else {
+ index = ctx - ohci->ir_context_list;
+ ohci->ir_context_mask |= 1 << index;
++ ohci->ir_context_channels |= 1ULL << base->channel;
+ }
+
+ spin_unlock_irqrestore(&ohci->lock, flags);
+ }
+
+-static int
+-ohci_queue_iso_transmit(struct fw_iso_context *base,
+- struct fw_iso_packet *packet,
+- struct fw_iso_buffer *buffer,
+- unsigned long payload)
++static int ohci_queue_iso_transmit(struct fw_iso_context *base,
++ struct fw_iso_packet *packet,
++ struct fw_iso_buffer *buffer,
++ unsigned long payload)
+ {
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ struct descriptor *d, *last, *pd;
+@@ -2128,11 +2122,10 @@ ohci_queue_iso_transmit(struct fw_iso_co
+ return 0;
+ }
+
+-static int
+-ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+- struct fw_iso_packet *packet,
+- struct fw_iso_buffer *buffer,
+- unsigned long payload)
++static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
++ struct fw_iso_packet *packet,
++ struct fw_iso_buffer *buffer,
++ unsigned long payload)
+ {
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ struct db_descriptor *db = NULL;
+@@ -2151,11 +2144,11 @@ ohci_queue_iso_receive_dualbuffer(struct
+ z = 2;
+
+ /*
+- * The OHCI controller puts the status word in the header
+- * buffer too, so we need 4 extra bytes per packet.
++ * The OHCI controller puts the isochronous header and trailer in the
++ * buffer, so we need at least 8 bytes.
+ */
+ packet_count = p->header_length / ctx->base.header_size;
+- header_size = packet_count * (ctx->base.header_size + 4);
++ header_size = packet_count * max(ctx->base.header_size, (size_t)8);
+
+ /* Get header size in number of descriptors. */
+ header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+@@ -2173,7 +2166,8 @@ ohci_queue_iso_receive_dualbuffer(struct
+ db = (struct db_descriptor *) d;
+ db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+ DESCRIPTOR_BRANCH_ALWAYS);
+- db->first_size = cpu_to_le16(ctx->base.header_size + 4);
++ db->first_size =
++ cpu_to_le16(max(ctx->base.header_size, (size_t)8));
+ if (p->skip && rest == p->payload_length) {
+ db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+ db->first_req_count = db->first_size;
+@@ -2208,11 +2202,10 @@ ohci_queue_iso_receive_dualbuffer(struct
+ return 0;
+ }
+
+-static int
+-ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
+- struct fw_iso_packet *packet,
+- struct fw_iso_buffer *buffer,
+- unsigned long payload)
++static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
++ struct fw_iso_packet *packet,
++ struct fw_iso_buffer *buffer,
++ unsigned long payload)
+ {
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ struct descriptor *d = NULL, *pd = NULL;
+@@ -2223,11 +2216,11 @@ ohci_queue_iso_receive_packet_per_buffer
+ int page, offset, packet_count, header_size, payload_per_buffer;
+
+ /*
+- * The OHCI controller puts the status word in the
+- * buffer too, so we need 4 extra bytes per packet.
++ * The OHCI controller puts the isochronous header and trailer in the
++ * buffer, so we need at least 8 bytes.
+ */
+ packet_count = p->header_length / ctx->base.header_size;
+- header_size = ctx->base.header_size + 4;
++ header_size = max(ctx->base.header_size, (size_t)8);
+
+ /* Get header size in number of descriptors. */
+ header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+@@ -2286,29 +2279,27 @@ ohci_queue_iso_receive_packet_per_buffer
+ return 0;
+ }
+
+-static int
+-ohci_queue_iso(struct fw_iso_context *base,
+- struct fw_iso_packet *packet,
+- struct fw_iso_buffer *buffer,
+- unsigned long payload)
++static int ohci_queue_iso(struct fw_iso_context *base,
++ struct fw_iso_packet *packet,
++ struct fw_iso_buffer *buffer,
++ unsigned long payload)
+ {
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ unsigned long flags;
+- int retval;
++ int ret;
+
+ spin_lock_irqsave(&ctx->context.ohci->lock, flags);
+ if (base->type == FW_ISO_CONTEXT_TRANSMIT)
+- retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
++ ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
+ else if (ctx->context.ohci->use_dualbuffer)
+- retval = ohci_queue_iso_receive_dualbuffer(base, packet,
+- buffer, payload);
++ ret = ohci_queue_iso_receive_dualbuffer(base, packet,
++ buffer, payload);
+ else
+- retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
+- buffer,
+- payload);
++ ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
++ buffer, payload);
+ spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
+
+- return retval;
++ return ret;
+ }
+
+ static const struct fw_card_driver ohci_driver = {
+@@ -2357,8 +2348,8 @@ static void ohci_pmac_off(struct pci_dev
+ #define ohci_pmac_off(dev)
+ #endif /* CONFIG_PPC_PMAC */
+
+-static int __devinit
+-pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
++static int __devinit pci_probe(struct pci_dev *dev,
++ const struct pci_device_id *ent)
+ {
+ struct fw_ohci *ohci;
+ u32 bus_options, max_receive, link_speed, version;
+@@ -2440,6 +2431,7 @@ pci_probe(struct pci_dev *dev, const str
+ ohci->it_context_list = kzalloc(size, GFP_KERNEL);
+
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
++ ohci->ir_context_channels = ~0ULL;
+ ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+ size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
+diff -Naurp linux-2.6-git/drivers/firewire/fw-sbp2.c firewire-git/drivers/firewire/fw-sbp2.c
+--- linux-2.6-git/drivers/firewire/fw-sbp2.c 2009-01-30 13:39:02.991771976 -0500
++++ firewire-git/drivers/firewire/fw-sbp2.c 2009-01-30 13:35:51.861646907 -0500
+@@ -392,20 +392,18 @@ static const struct {
+ }
+ };
+
+-static void
+-free_orb(struct kref *kref)
++static void free_orb(struct kref *kref)
+ {
+ struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
+
+ kfree(orb);
+ }
+
+-static void
+-sbp2_status_write(struct fw_card *card, struct fw_request *request,
+- int tcode, int destination, int source,
+- int generation, int speed,
+- unsigned long long offset,
+- void *payload, size_t length, void *callback_data)
++static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
++ int tcode, int destination, int source,
++ int generation, int speed,
++ unsigned long long offset,
++ void *payload, size_t length, void *callback_data)
+ {
+ struct sbp2_logical_unit *lu = callback_data;
+ struct sbp2_orb *orb;
+@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card,
+ fw_send_response(card, request, RCODE_COMPLETE);
+ }
+
+-static void
+-complete_transaction(struct fw_card *card, int rcode,
+- void *payload, size_t length, void *data)
++static void complete_transaction(struct fw_card *card, int rcode,
++ void *payload, size_t length, void *data)
+ {
+ struct sbp2_orb *orb = data;
+ unsigned long flags;
+@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *car
+ kref_put(&orb->kref, free_orb);
+ }
+
+-static void
+-sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
+- int node_id, int generation, u64 offset)
++static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
++ int node_id, int generation, u64 offset)
+ {
+ struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ unsigned long flags;
+@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_
+ return retval;
+ }
+
+-static void
+-complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
++static void complete_management_orb(struct sbp2_orb *base_orb,
++ struct sbp2_status *status)
+ {
+ struct sbp2_management_orb *orb =
+ container_of(base_orb, struct sbp2_management_orb, base);
+@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb
+ complete(&orb->done);
+ }
+
+-static int
+-sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+- int generation, int function, int lun_or_login_id,
+- void *response)
++static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
++ int generation, int function,
++ int lun_or_login_id, void *response)
+ {
+ struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
+ struct sbp2_management_orb *orb;
+@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2
+ &d, sizeof(d));
+ }
+
+-static void
+-complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
+- void *payload, size_t length, void *data)
++static void complete_agent_reset_write_no_wait(struct fw_card *card,
++ int rcode, void *payload, size_t length, void *data)
+ {
+ kfree(data);
+ }
+@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struc
+ sizeof(orb->page_table), DMA_TO_DEVICE);
+ }
+
+-static unsigned int
+-sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
++static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
+ {
+ int sam_status;
+
+@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_statu
+ }
+ }
+
+-static void
+-complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
++static void complete_command_orb(struct sbp2_orb *base_orb,
++ struct sbp2_status *status)
+ {
+ struct sbp2_command_orb *orb =
+ container_of(base_orb, struct sbp2_command_orb, base);
+@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *ba
+ orb->done(orb->cmd);
+ }
+
+-static int
+-sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
+- struct sbp2_logical_unit *lu)
++static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
++ struct fw_device *device, struct sbp2_logical_unit *lu)
+ {
+ struct scatterlist *sg = scsi_sglist(orb->cmd);
+ int i, n;
+@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_c
+ * This is the concatenation of target port identifier and logical unit
+ * identifier as per SAM-2...SAM-4 annex A.
+ */
+-static ssize_t
+-sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
+- char *buf)
++static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct sbp2_logical_unit *lu;
+diff -Naurp linux-2.6-git/drivers/firewire/fw-topology.c firewire-git/drivers/firewire/fw-topology.c
+--- linux-2.6-git/drivers/firewire/fw-topology.c 2009-01-30 13:39:02.991771976 -0500
++++ firewire-git/drivers/firewire/fw-topology.c 2009-01-30 13:35:51.862647087 -0500
+@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struc
+ struct fw_node * node,
+ struct fw_node * parent);
+
+-static void
+-for_each_fw_node(struct fw_card *card, struct fw_node *root,
+- fw_node_callback_t callback)
++static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
++ fw_node_callback_t callback)
+ {
+ struct list_head list;
+ struct fw_node *node, *next, *child, *parent;
+@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, s
+ fw_node_put(node);
+ }
+
+-static void
+-report_lost_node(struct fw_card *card,
+- struct fw_node *node, struct fw_node *parent)
++static void report_lost_node(struct fw_card *card,
++ struct fw_node *node, struct fw_node *parent)
+ {
+ fw_node_event(card, node, FW_NODE_DESTROYED);
+ fw_node_put(node);
+@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card,
+ card->bm_retries = 0;
+ }
+
+-static void
+-report_found_node(struct fw_card *card,
+- struct fw_node *node, struct fw_node *parent)
++static void report_found_node(struct fw_card *card,
++ struct fw_node *node, struct fw_node *parent)
+ {
+ int b_path = (node->phy_speed == SCODE_BETA);
+
+@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *no
+ * found, lost or updated. Update the nodes in the card topology tree
+ * as we go.
+ */
+-static void
+-update_tree(struct fw_card *card, struct fw_node *root)
++static void update_tree(struct fw_card *card, struct fw_node *root)
+ {
+ struct list_head list0, list1;
+ struct fw_node *node0, *node1, *next1;
+@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct
+ }
+ }
+
+-static void
+-update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
++static void update_topology_map(struct fw_card *card,
++ u32 *self_ids, int self_id_count)
+ {
+ int node_count;
+
+@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card
+ fw_compute_block_crc(card->topology_map);
+ }
+
+-void
+-fw_core_handle_bus_reset(struct fw_card *card,
+- int node_id, int generation,
+- int self_id_count, u32 * self_ids)
++void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
++ int self_id_count, u32 *self_ids)
+ {
+ struct fw_node *local_node;
+ unsigned long flags;
+diff -Naurp linux-2.6-git/drivers/firewire/fw-topology.h firewire-git/drivers/firewire/fw-topology.h
+--- linux-2.6-git/drivers/firewire/fw-topology.h 2008-11-04 11:18:33.000000000 -0500
++++ firewire-git/drivers/firewire/fw-topology.h 2009-01-30 13:35:51.862647087 -0500
+@@ -19,6 +19,11 @@
+ #ifndef __fw_topology_h
+ #define __fw_topology_h
+
++#include <linux/list.h>
++#include <linux/slab.h>
++
++#include <asm/atomic.h>
++
+ enum {
+ FW_NODE_CREATED,
+ FW_NODE_UPDATED,
+@@ -51,26 +56,22 @@ struct fw_node {
+ struct fw_node *ports[0];
+ };
+
+-static inline struct fw_node *
+-fw_node_get(struct fw_node *node)
++static inline struct fw_node *fw_node_get(struct fw_node *node)
+ {
+ atomic_inc(&node->ref_count);
+
+ return node;
+ }
+
+-static inline void
+-fw_node_put(struct fw_node *node)
++static inline void fw_node_put(struct fw_node *node)
+ {
+ if (atomic_dec_and_test(&node->ref_count))
+ kfree(node);
+ }
+
+-void
+-fw_destroy_nodes(struct fw_card *card);
+-
+-int
+-fw_compute_block_crc(u32 *block);
++struct fw_card;
++void fw_destroy_nodes(struct fw_card *card);
+
++int fw_compute_block_crc(u32 *block);
+
+ #endif /* __fw_topology_h */
+diff -Naurp linux-2.6-git/drivers/firewire/fw-transaction.c firewire-git/drivers/firewire/fw-transaction.c
+--- linux-2.6-git/drivers/firewire/fw-transaction.c 2009-01-30 13:39:02.991771976 -0500
++++ firewire-git/drivers/firewire/fw-transaction.c 2009-01-30 13:35:51.862647087 -0500
+@@ -64,10 +64,9 @@
+ #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
+ #define PHY_IDENTIFIER(id) ((id) << 30)
+
+-static int
+-close_transaction(struct fw_transaction *transaction,
+- struct fw_card *card, int rcode,
+- u32 *payload, size_t length)
++static int close_transaction(struct fw_transaction *transaction,
++ struct fw_card *card, int rcode,
++ u32 *payload, size_t length)
+ {
+ struct fw_transaction *t;
+ unsigned long flags;
+@@ -94,9 +93,8 @@ close_transaction(struct fw_transaction
+ * Only valid for transactions that are potentially pending (ie have
+ * been sent).
+ */
+-int
+-fw_cancel_transaction(struct fw_card *card,
+- struct fw_transaction *transaction)
++int fw_cancel_transaction(struct fw_card *card,
++ struct fw_transaction *transaction)
+ {
+ /*
+ * Cancel the packet transmission if it's still queued. That
+@@ -116,9 +114,8 @@ fw_cancel_transaction(struct fw_card *ca
+ }
+ EXPORT_SYMBOL(fw_cancel_transaction);
+
+-static void
+-transmit_complete_callback(struct fw_packet *packet,
+- struct fw_card *card, int status)
++static void transmit_complete_callback(struct fw_packet *packet,
++ struct fw_card *card, int status)
+ {
+ struct fw_transaction *t =
+ container_of(packet, struct fw_transaction, packet);
+@@ -151,8 +148,7 @@ transmit_complete_callback(struct fw_pac
+ }
+ }
+
+-static void
+-fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
++static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
+ int destination_id, int source_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length)
+ {
+@@ -247,12 +243,10 @@ fw_fill_request(struct fw_packet *packet
+ * @param callback_data pointer to arbitrary data, which will be
+ * passed to the callback
+ */
+-void
+-fw_send_request(struct fw_card *card, struct fw_transaction *t,
+- int tcode, int destination_id, int generation, int speed,
+- unsigned long long offset,
+- void *payload, size_t length,
+- fw_transaction_callback_t callback, void *callback_data)
++void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
++ int destination_id, int generation, int speed,
++ unsigned long long offset, void *payload, size_t length,
++ fw_transaction_callback_t callback, void *callback_data)
+ {
+ unsigned long flags;
+ int tlabel;
+@@ -322,8 +316,8 @@ static void transaction_callback(struct
+ * Returns the RCODE.
+ */
+ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+- int generation, int speed, unsigned long long offset,
+- void *data, size_t length)
++ int generation, int speed, unsigned long long offset,
++ void *data, size_t length)
+ {
+ struct transaction_callback_data d;
+ struct fw_transaction t;
+@@ -399,9 +393,8 @@ void fw_flush_transactions(struct fw_car
+ }
+ }
+
+-static struct fw_address_handler *
+-lookup_overlapping_address_handler(struct list_head *list,
+- unsigned long long offset, size_t length)
++static struct fw_address_handler *lookup_overlapping_address_handler(
++ struct list_head *list, unsigned long long offset, size_t length)
+ {
+ struct fw_address_handler *handler;
+
+@@ -414,9 +407,8 @@ lookup_overlapping_address_handler(struc
+ return NULL;
+ }
+
+-static struct fw_address_handler *
+-lookup_enclosing_address_handler(struct list_head *list,
+- unsigned long long offset, size_t length)
++static struct fw_address_handler *lookup_enclosing_address_handler(
++ struct list_head *list, unsigned long long offset, size_t length)
+ {
+ struct fw_address_handler *handler;
+
+@@ -449,36 +441,44 @@ const struct fw_address_region fw_unit_s
+ #endif /* 0 */
+
+ /**
+- * Allocate a range of addresses in the node space of the OHCI
+- * controller. When a request is received that falls within the
+- * specified address range, the specified callback is invoked. The
+- * parameters passed to the callback give the details of the
+- * particular request.
++ * fw_core_add_address_handler - register for incoming requests
++ * @handler: callback
++ * @region: region in the IEEE 1212 node space address range
++ *
++ * region->start, ->end, and handler->length have to be quadlet-aligned.
++ *
++ * When a request is received that falls within the specified address range,
++ * the specified callback is invoked. The parameters passed to the callback
++ * give the details of the particular request.
+ *
+ * Return value: 0 on success, non-zero otherwise.
+ * The start offset of the handler's address region is determined by
+ * fw_core_add_address_handler() and is returned in handler->offset.
+- * The offset is quadlet-aligned.
+ */
+-int
+-fw_core_add_address_handler(struct fw_address_handler *handler,
+- const struct fw_address_region *region)
++int fw_core_add_address_handler(struct fw_address_handler *handler,
++ const struct fw_address_region *region)
+ {
+ struct fw_address_handler *other;
+ unsigned long flags;
+ int ret = -EBUSY;
+
++ if (region->start & 0xffff000000000003ULL ||
++ region->end & 0xffff000000000003ULL ||
++ region->start >= region->end ||
++ handler->length & 3 ||
++ handler->length == 0)
++ return -EINVAL;
++
+ spin_lock_irqsave(&address_handler_lock, flags);
+
+- handler->offset = roundup(region->start, 4);
++ handler->offset = region->start;
+ while (handler->offset + handler->length <= region->end) {
+ other =
+ lookup_overlapping_address_handler(&address_handler_list,
+ handler->offset,
+ handler->length);
+ if (other != NULL) {
+- handler->offset =
+- roundup(other->offset + other->length, 4);
++ handler->offset += other->length;
+ } else {
+ list_add_tail(&handler->link, &address_handler_list);
+ ret = 0;
+@@ -493,12 +493,7 @@ fw_core_add_address_handler(struct fw_ad
+ EXPORT_SYMBOL(fw_core_add_address_handler);
+
+ /**
+- * Deallocate a range of addresses allocated with fw_allocate. This
+- * will call the associated callback one last time with a the special
+- * tcode TCODE_DEALLOCATE, to let the client destroy the registered
+- * callback data. For convenience, the callback parameters offset and
+- * length are set to the start and the length respectively for the
+- * deallocated region, payload is set to NULL.
++ * fw_core_remove_address_handler - unregister an address handler
+ */
+ void fw_core_remove_address_handler(struct fw_address_handler *handler)
+ {
+@@ -518,9 +513,8 @@ struct fw_request {
+ u32 data[0];
+ };
+
+-static void
+-free_response_callback(struct fw_packet *packet,
+- struct fw_card *card, int status)
++static void free_response_callback(struct fw_packet *packet,
++ struct fw_card *card, int status)
+ {
+ struct fw_request *request;
+
+@@ -528,9 +522,8 @@ free_response_callback(struct fw_packet
+ kfree(request);
+ }
+
+-void
+-fw_fill_response(struct fw_packet *response, u32 *request_header,
+- int rcode, void *payload, size_t length)
++void fw_fill_response(struct fw_packet *response, u32 *request_header,
++ int rcode, void *payload, size_t length)
+ {
+ int tcode, tlabel, extended_tcode, source, destination;
+
+@@ -588,8 +581,7 @@ fw_fill_response(struct fw_packet *respo
+ }
+ EXPORT_SYMBOL(fw_fill_response);
+
+-static struct fw_request *
+-allocate_request(struct fw_packet *p)
++static struct fw_request *allocate_request(struct fw_packet *p)
+ {
+ struct fw_request *request;
+ u32 *data, length;
+@@ -649,8 +641,8 @@ allocate_request(struct fw_packet *p)
+ return request;
+ }
+
+-void
+-fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
++void fw_send_response(struct fw_card *card,
++ struct fw_request *request, int rcode)
+ {
+ /* unified transaction or broadcast transaction: don't respond */
+ if (request->ack != ACK_PENDING ||
+@@ -670,8 +662,7 @@ fw_send_response(struct fw_card *card, s
+ }
+ EXPORT_SYMBOL(fw_send_response);
+
+-void
+-fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
++void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+ {
+ struct fw_address_handler *handler;
+ struct fw_request *request;
+@@ -719,8 +710,7 @@ fw_core_handle_request(struct fw_card *c
+ }
+ EXPORT_SYMBOL(fw_core_handle_request);
+
+-void
+-fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
++void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
+ {
+ struct fw_transaction *t;
+ unsigned long flags;
+@@ -793,12 +783,10 @@ static const struct fw_address_region to
+ { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
+ .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
+
+-static void
+-handle_topology_map(struct fw_card *card, struct fw_request *request,
+- int tcode, int destination, int source,
+- int generation, int speed,
+- unsigned long long offset,
+- void *payload, size_t length, void *callback_data)
++static void handle_topology_map(struct fw_card *card, struct fw_request *request,
++ int tcode, int destination, int source, int generation,
++ int speed, unsigned long long offset,
++ void *payload, size_t length, void *callback_data)
+ {
+ int i, start, end;
+ __be32 *map;
+@@ -832,12 +820,10 @@ static const struct fw_address_region re
+ { .start = CSR_REGISTER_BASE,
+ .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
+
+-static void
+-handle_registers(struct fw_card *card, struct fw_request *request,
+- int tcode, int destination, int source,
+- int generation, int speed,
+- unsigned long long offset,
+- void *payload, size_t length, void *callback_data)
++static void handle_registers(struct fw_card *card, struct fw_request *request,
++ int tcode, int destination, int source, int generation,
++ int speed, unsigned long long offset,
++ void *payload, size_t length, void *callback_data)
+ {
+ int reg = offset & ~CSR_REGISTER_BASE;
+ unsigned long long bus_time;
+@@ -939,11 +925,11 @@ static struct fw_descriptor model_id_des
+
+ static int __init fw_core_init(void)
+ {
+- int retval;
++ int ret;
+
+- retval = bus_register(&fw_bus_type);
+- if (retval < 0)
+- return retval;
++ ret = bus_register(&fw_bus_type);
++ if (ret < 0)
++ return ret;
+
+ fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
+ if (fw_cdev_major < 0) {
+@@ -951,19 +937,10 @@ static int __init fw_core_init(void)
+ return fw_cdev_major;
+ }
+
+- retval = fw_core_add_address_handler(&topology_map,
+- &topology_map_region);
+- BUG_ON(retval < 0);
+-
+- retval = fw_core_add_address_handler(&registers,
+- &registers_region);
+- BUG_ON(retval < 0);
+-
+- /* Add the vendor textual descriptor. */
+- retval = fw_core_add_descriptor(&vendor_id_descriptor);
+- BUG_ON(retval < 0);
+- retval = fw_core_add_descriptor(&model_id_descriptor);
+- BUG_ON(retval < 0);
++ fw_core_add_address_handler(&topology_map, &topology_map_region);
++ fw_core_add_address_handler(&registers, &registers_region);
++ fw_core_add_descriptor(&vendor_id_descriptor);
++ fw_core_add_descriptor(&model_id_descriptor);
+
+ return 0;
+ }
+diff -Naurp linux-2.6-git/drivers/firewire/fw-transaction.h firewire-git/drivers/firewire/fw-transaction.h
+--- linux-2.6-git/drivers/firewire/fw-transaction.h 2009-01-30 13:39:02.992772636 -0500
++++ firewire-git/drivers/firewire/fw-transaction.h 2009-01-30 13:35:51.862647087 -0500
+@@ -82,14 +82,14 @@
+ #define CSR_SPEED_MAP 0x2000
+ #define CSR_SPEED_MAP_END 0x3000
+
++#define BANDWIDTH_AVAILABLE_INITIAL 4915
+ #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
+ #define BROADCAST_CHANNEL_VALID (1 << 30)
+
+ #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
+ #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
+
+-static inline void
+-fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
++static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
+ {
+ u32 *dst = _dst;
+ __be32 *src = _src;
+@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_s
+ dst[i] = be32_to_cpu(src[i]);
+ }
+
+-static inline void
+-fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
++static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
+ {
+ fw_memcpy_from_be32(_dst, _src, size);
+ }
+@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(str
+ struct fw_card *card, int status);
+
+ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
+- void *data,
+- size_t length,
++ void *data, size_t length,
+ void *callback_data);
+
+ /*
+@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(st
+ void *data, size_t length,
+ void *callback_data);
+
+-typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
+- int node_id, int generation,
+- u32 *self_ids,
+- int self_id_count,
+- void *callback_data);
+-
+ struct fw_packet {
+ int speed;
+ int generation;
+@@ -187,12 +179,6 @@ struct fw_transaction {
+ void *callback_data;
+ };
+
+-static inline struct fw_packet *
+-fw_packet(struct list_head *l)
+-{
+- return list_entry(l, struct fw_packet, link);
+-}
+-
+ struct fw_address_handler {
+ u64 offset;
+ size_t length;
+@@ -201,7 +187,6 @@ struct fw_address_handler {
+ struct list_head link;
+ };
+
+-
+ struct fw_address_region {
+ u64 start;
+ u64 end;
+@@ -315,10 +300,8 @@ struct fw_iso_packet {
+ struct fw_iso_context;
+
+ typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+- u32 cycle,
+- size_t header_length,
+- void *header,
+- void *data);
++ u32 cycle, size_t header_length,
++ void *header, void *data);
+
+ /*
+ * An iso buffer is just a set of pages mapped for DMA in the
+@@ -344,36 +327,25 @@ struct fw_iso_context {
+ void *callback_data;
+ };
+
+-int
+-fw_iso_buffer_init(struct fw_iso_buffer *buffer,
+- struct fw_card *card,
+- int page_count,
+- enum dma_data_direction direction);
+-int
+-fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+-void
+-fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+-
+-struct fw_iso_context *
+-fw_iso_context_create(struct fw_card *card, int type,
+- int channel, int speed, size_t header_size,
+- fw_iso_callback_t callback, void *callback_data);
+-
+-void
+-fw_iso_context_destroy(struct fw_iso_context *ctx);
+-
+-int
+-fw_iso_context_queue(struct fw_iso_context *ctx,
+- struct fw_iso_packet *packet,
+- struct fw_iso_buffer *buffer,
+- unsigned long payload);
+-
+-int
+-fw_iso_context_start(struct fw_iso_context *ctx,
+- int cycle, int sync, int tags);
++int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
++ int page_count, enum dma_data_direction direction);
++int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
++void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
++
++struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
++ int type, int channel, int speed, size_t header_size,
++ fw_iso_callback_t callback, void *callback_data);
++int fw_iso_context_queue(struct fw_iso_context *ctx,
++ struct fw_iso_packet *packet,
++ struct fw_iso_buffer *buffer,
++ unsigned long payload);
++int fw_iso_context_start(struct fw_iso_context *ctx,
++ int cycle, int sync, int tags);
++int fw_iso_context_stop(struct fw_iso_context *ctx);
++void fw_iso_context_destroy(struct fw_iso_context *ctx);
+
+-int
+-fw_iso_context_stop(struct fw_iso_context *ctx);
++void fw_iso_resource_manage(struct fw_card *card, int generation,
++ u64 channels_mask, int *channel, int *bandwidth, bool allocate);
+
+ struct fw_card_driver {
+ /*
+@@ -415,7 +387,7 @@ struct fw_card_driver {
+
+ struct fw_iso_context *
+ (*allocate_iso_context)(struct fw_card *card,
+- int type, size_t header_size);
++ int type, int channel, size_t header_size);
+ void (*free_iso_context)(struct fw_iso_context *ctx);
+
+ int (*start_iso)(struct fw_iso_context *ctx,
+@@ -429,24 +401,18 @@ struct fw_card_driver {
+ int (*stop_iso)(struct fw_iso_context *ctx);
+ };
+
+-int
+-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
++int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
+
+-void
+-fw_send_request(struct fw_card *card, struct fw_transaction *t,
++void fw_send_request(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int destination_id, int generation, int speed,
+ unsigned long long offset, void *data, size_t length,
+ fw_transaction_callback_t callback, void *callback_data);
+-
+-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+- int generation, int speed, unsigned long long offset,
+- void *data, size_t length);
+-
+ int fw_cancel_transaction(struct fw_card *card,
+ struct fw_transaction *transaction);
+-
+ void fw_flush_transactions(struct fw_card *card);
+-
++int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
++ int generation, int speed, unsigned long long offset,
++ void *data, size_t length);
+ void fw_send_phy_config(struct fw_card *card,
+ int node_id, int generation, int gap_count);
+
+@@ -454,29 +420,18 @@ void fw_send_phy_config(struct fw_card *
+ * Called by the topology code to inform the device code of node
+ * activity; found, lost, or updated nodes.
+ */
+-void
+-fw_node_event(struct fw_card *card, struct fw_node *node, int event);
++void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
+
+ /* API used by card level drivers */
+
+-void
+-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
+- struct device *device);
+-int
+-fw_card_add(struct fw_card *card,
+- u32 max_receive, u32 link_speed, u64 guid);
+-
+-void
+-fw_core_remove_card(struct fw_card *card);
+-
+-void
+-fw_core_handle_bus_reset(struct fw_card *card,
+- int node_id, int generation,
+- int self_id_count, u32 *self_ids);
+-void
+-fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
+-
+-void
+-fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
++void fw_card_initialize(struct fw_card *card,
++ const struct fw_card_driver *driver, struct device *device);
++int fw_card_add(struct fw_card *card,
++ u32 max_receive, u32 link_speed, u64 guid);
++void fw_core_remove_card(struct fw_card *card);
++void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
++ int generation, int self_id_count, u32 *self_ids);
++void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
++void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+
+ #endif /* __fw_transaction_h */
+--- linux-2.6-git/include/linux/firewire-cdev.h 2008-11-04 11:19:21.000000000 -0500
++++ firewire-git/include/linux/firewire-cdev.h 2009-01-30 13:35:54.327647015 -0500
+@@ -25,10 +25,12 @@
+ #include <linux/types.h>
+ #include <linux/firewire-constants.h>
+
+-#define FW_CDEV_EVENT_BUS_RESET 0x00
+-#define FW_CDEV_EVENT_RESPONSE 0x01
+-#define FW_CDEV_EVENT_REQUEST 0x02
+-#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
++#define FW_CDEV_EVENT_BUS_RESET 0x00
++#define FW_CDEV_EVENT_RESPONSE 0x01
++#define FW_CDEV_EVENT_REQUEST 0x02
++#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
++#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
++#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
+
+ /**
+ * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
+@@ -136,7 +138,24 @@ struct fw_cdev_event_request {
+ * This event is sent when the controller has completed an &fw_cdev_iso_packet
+ * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
+ * stripped of all packets up until and including the interrupt packet are
+- * returned in the @header field.
++ * returned in the @header field. The amount of header data per packet is as
++ * specified at iso context creation by &fw_cdev_create_iso_context.header_size.
++ *
++ * In version 1 of this ABI, header data consisted of the 1394 isochronous
++ * packet header, followed by quadlets from the packet payload if
++ * &fw_cdev_create_iso_context.header_size > 4.
++ *
++ * In version 2 of this ABI, header data consist of the 1394 isochronous
++ * packet header, followed by a timestamp quadlet if
++ * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
++ * packet payload if &fw_cdev_create_iso_context.header_size > 8.
++ *
++ * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
++ *
++ * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
++ * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
++ * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
++ * order.
+ */
+ struct fw_cdev_event_iso_interrupt {
+ __u64 closure;
+@@ -147,12 +166,44 @@ struct fw_cdev_event_iso_interrupt {
+ };
+
+ /**
++ * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
++ * @closure: See &fw_cdev_event_common;
++ * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
++ * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
++ * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
++ * @handle: Reference by which an allocated resource can be deallocated
++ * @channel: Isochronous channel which was (de)allocated, if any
++ * @bandwidth: Bandwidth allocation units which were (de)allocated, if any
++ *
++ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous
++ * resource was allocated at the IRM. The client has to check @channel and
++ * @bandwidth for whether the allocation actually succeeded.
++ *
++ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous
++ * resource was deallocated at the IRM. It is also sent when automatic
++ * reallocation after a bus reset failed.
++ *
++ * @channel is <0 if no channel was (de)allocated or if reallocation failed.
++ * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed.
++ */
++struct fw_cdev_event_iso_resource {
++ __u64 closure;
++ __u32 type;
++ __u32 handle;
++ __s32 channel;
++ __s32 bandwidth;
++};
++
++/**
+ * union fw_cdev_event - Convenience union of fw_cdev_event_ types
+ * @common: Valid for all types
+ * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
+ * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
+ * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
+ * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
++ * @iso_resource: Valid if @common.type ==
++ * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
++ * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
+ *
+ * Convenience union for userspace use. Events could be read(2) into an
+ * appropriately aligned char buffer and then cast to this union for further
+@@ -163,13 +214,15 @@ struct fw_cdev_event_iso_interrupt {
+ * not fit will be discarded so that the next read(2) will return a new event.
+ */
+ union fw_cdev_event {
+- struct fw_cdev_event_common common;
+- struct fw_cdev_event_bus_reset bus_reset;
+- struct fw_cdev_event_response response;
+- struct fw_cdev_event_request request;
+- struct fw_cdev_event_iso_interrupt iso_interrupt;
++ struct fw_cdev_event_common common;
++ struct fw_cdev_event_bus_reset bus_reset;
++ struct fw_cdev_event_response response;
++ struct fw_cdev_event_request request;
++ struct fw_cdev_event_iso_interrupt iso_interrupt;
++ struct fw_cdev_event_iso_resource iso_resource;
+ };
+
++/* available since kernel version 2.6.22 */
+ #define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
+ #define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
+ #define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
+@@ -178,18 +231,29 @@ union fw_cdev_event {
+ #define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
+ #define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
+ #define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
+-
+ #define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
+ #define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
+ #define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
+ #define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
++
++/* available since kernel version 2.6.24 */
+ #define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
+
+-/* FW_CDEV_VERSION History
+- *
+- * 1 Feb 18, 2007: Initial version.
++/* available since kernel version 2.6.30 */
++#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource)
++#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate)
++#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource)
++#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource)
++#define FW_CDEV_IOC_GET_SPEED _IOR('#', 0x11, struct fw_cdev_get_speed)
++#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
++
++/*
++ * FW_CDEV_VERSION History
++ * 1 (2.6.22) - initial version
++ * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
++ * &fw_cdev_create_iso_context.header_size is 8 or more
+ */
+-#define FW_CDEV_VERSION 1
++#define FW_CDEV_VERSION 2
+
+ /**
+ * struct fw_cdev_get_info - General purpose information ioctl
+@@ -201,7 +265,7 @@ union fw_cdev_event {
+ * case, @rom_length is updated with the actual length of the
+ * configuration ROM.
+ * @rom: If non-zero, address of a buffer to be filled by a copy of the
+- * local node's configuration ROM
++ * device's configuration ROM
+ * @bus_reset: If non-zero, address of a buffer to be filled by a
+ * &struct fw_cdev_event_bus_reset with the current state
+ * of the bus. This does not cause a bus reset to happen.
+@@ -229,7 +293,7 @@ struct fw_cdev_get_info {
+ * Send a request to the device. This ioctl implements all outgoing requests.
+ * Both quadlet and block request specify the payload as a pointer to the data
+ * in the @data field. Once the transaction completes, the kernel writes an
+- * &fw_cdev_event_request event back. The @closure field is passed back to
++ * &fw_cdev_event_response event back. The @closure field is passed back to
+ * user space in the response event.
+ */
+ struct fw_cdev_send_request {
+@@ -284,9 +348,9 @@ struct fw_cdev_allocate {
+ };
+
+ /**
+- * struct fw_cdev_deallocate - Free an address range allocation
+- * @handle: Handle to the address range, as returned by the kernel when the
+- * range was allocated
++ * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource
++ * @handle: Handle to the address range or iso resource, as returned by the
++ * kernel when the range or resource was allocated
+ */
+ struct fw_cdev_deallocate {
+ __u32 handle;
+@@ -370,6 +434,9 @@ struct fw_cdev_remove_descriptor {
+ *
+ * If a context was successfully created, the kernel writes back a handle to the
+ * context, which must be passed in for subsequent operations on that context.
++ *
++ * Note that the effect of a @header_size > 4 depends on
++ * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
+ */
+ struct fw_cdev_create_iso_context {
+ __u32 type;
+@@ -473,10 +540,73 @@ struct fw_cdev_stop_iso {
+ * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
+ * and also the system clock. This allows to express the receive time of an
+ * isochronous packet as a system time with microsecond accuracy.
++ *
++ * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
++ * 12 bits cycleOffset, in host byte order.
+ */
+ struct fw_cdev_get_cycle_timer {
+ __u64 local_time;
+ __u32 cycle_timer;
+ };
+
++/**
++ * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
++ * @closure: Passed back to userspace in correponding iso resource events
++ * @channels: Isochronous channels of which one is to be (de)allocated
++ * @bandwidth: Isochronous bandwidth units to be (de)allocated
++ * @handle: Handle to the allocation, written by the kernel (only valid in
++ * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls)
++ *
++ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an
++ * isochronous channel and/or of isochronous bandwidth at the isochronous
++ * resource manager (IRM). Only one of the channels specified in @channels is
++ * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after
++ * communication with the IRM, indicating success or failure in the event data.
++ * The kernel will automatically reallocate the resources after bus resets.
++ * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event
++ * will be sent. The kernel will also automatically deallocate the resources
++ * when the file descriptor is closed.
++ *
++ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate
++ * deallocation of resources which were allocated as described above.
++ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
++ *
++ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation
++ * without automatic re- or deallocation.
++ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation,
++ * indicating success or failure in its data.
++ *
++ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like
++ * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed
++ * instead of allocated.
++ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
++ *
++ * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
++ * for the lifetime of the fd or handle.
++ * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
++ * for the duration of a bus generation.
++ *
++ * @channels is a host-endian bitfield with the least significant bit
++ * representing channel 0 and the most significant bit representing channel 63:
++ * 1ULL << c for each channel c that is a candidate for (de)allocation.
++ *
++ * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send
++ * one quadlet of data (payload or header data) at speed S1600.
++ */
++struct fw_cdev_allocate_iso_resource {
++ __u64 closure;
++ __u64 channels;
++ __u32 bandwidth;
++ __u32 handle;
++};
++
++/**
++ * struct fw_cdev_get_speed - Query maximum speed to or from this device
++ * @max_speed: Speed code; minimum of the device's link speed, the local node's
++ * link speed, and all PHY port speeds between the two links
++ */
++struct fw_cdev_get_speed {
++ __u32 max_speed;
++};
++
+ #endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/freed-ora/current/F-12/linux-2.6-g5-therm-shutdown.patch b/freed-ora/current/F-12/linux-2.6-g5-therm-shutdown.patch
new file mode 100644
index 000000000..1471ef1bb
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-g5-therm-shutdown.patch
@@ -0,0 +1,70 @@
+--- linux-2.6.15/drivers/macintosh/therm_pm72.c.orig 2006-04-02 21:34:48.000000000 +0100
++++ linux-2.6.15/drivers/macintosh/therm_pm72.c 2006-04-02 22:33:27.000000000 +0100
+@@ -924,10 +925,16 @@ static void do_monitor_cpu_combined(void
+ printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n",
+ temp_combi >> 16);
+ state0->overtemp += CPU_MAX_OVERTEMP / 4;
+- } else if (temp_combi > (state0->mpu.tmax << 16))
++ } else if (temp_combi > (state0->mpu.tmax << 16)) {
+ state0->overtemp++;
+- else
++ printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n",
++ temp_combi >> 16, state0->mpu.tmax, state0->overtemp);
++ } else {
++ if (state0->overtemp)
++ printk(KERN_WARNING "Temperature back down to %d\n",
++ temp_combi >> 16);
+ state0->overtemp = 0;
++ }
+ if (state0->overtemp >= CPU_MAX_OVERTEMP)
+ critical_state = 1;
+ if (state0->overtemp > 0) {
+@@ -999,10 +1015,16 @@ static void do_monitor_cpu_split(struct
+ " (%d) !\n",
+ state->index, temp >> 16);
+ state->overtemp += CPU_MAX_OVERTEMP / 4;
+- } else if (temp > (state->mpu.tmax << 16))
++ } else if (temp > (state->mpu.tmax << 16)) {
+ state->overtemp++;
+- else
++ printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
++ state->index, temp >> 16, state->mpu.tmax, state->overtemp);
++ } else {
++ if (state->overtemp)
++ printk(KERN_WARNING "CPU %d temperature back down to %d\n",
++ state->index, temp >> 16);
+ state->overtemp = 0;
++ }
+ if (state->overtemp >= CPU_MAX_OVERTEMP)
+ critical_state = 1;
+ if (state->overtemp > 0) {
+@@ -1061,10 +1097,16 @@ static void do_monitor_cpu_rack(struct c
+ " (%d) !\n",
+ state->index, temp >> 16);
+ state->overtemp = CPU_MAX_OVERTEMP / 4;
+- } else if (temp > (state->mpu.tmax << 16))
++ } else if (temp > (state->mpu.tmax << 16)) {
+ state->overtemp++;
+- else
++ printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
++ state->index, temp >> 16, state->mpu.tmax, state->overtemp);
++ } else {
++ if (state->overtemp)
++ printk(KERN_WARNING "CPU %d temperature back down to %d\n",
++ state->index, temp >> 16);
+ state->overtemp = 0;
++ }
+ if (state->overtemp >= CPU_MAX_OVERTEMP)
+ critical_state = 1;
+ if (state->overtemp > 0) {
+--- linux-2.6.15/drivers/macintosh/therm_pm72.h~ 2006-01-03 03:21:10.000000000 +0000
++++ linux-2.6.15/drivers/macintosh/therm_pm72.h 2006-04-02 22:25:58.000000000 +0100
+@@ -243,7 +243,7 @@ struct dimm_pid_state
+ #define CPU_TEMP_HISTORY_SIZE 2
+ #define CPU_POWER_HISTORY_SIZE 10
+ #define CPU_PID_INTERVAL 1
+-#define CPU_MAX_OVERTEMP 30
++#define CPU_MAX_OVERTEMP 90
+
+ #define CPUA_PUMP_RPM_INDEX 7
+ #define CPUB_PUMP_RPM_INDEX 8
diff --git a/freed-ora/current/F-12/linux-2.6-hotfixes.patch b/freed-ora/current/F-12/linux-2.6-hotfixes.patch
new file mode 100644
index 000000000..257cc5642
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-hotfixes.patch
@@ -0,0 +1 @@
+foo
diff --git a/freed-ora/current/F-12/linux-2.6-imac-transparent-bridge.patch b/freed-ora/current/F-12/linux-2.6-imac-transparent-bridge.patch
new file mode 100644
index 000000000..7fd536026
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-imac-transparent-bridge.patch
@@ -0,0 +1,15 @@
+--- linux/arch/powerpc/platforms/powermac/pci.c~ 2008-03-22 19:08:07.000000000 +0000
++++ linux/arch/powerpc/platforms/powermac/pci.c 2008-03-23 09:10:46.000000000 +0000
+@@ -1271,6 +1271,12 @@ void pmac_pci_fixup_pciata(struct pci_de
+ }
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
++#else /* CONFIG_PPC64 */
++static void __devinit imac_transparent_bridge(struct pci_dev *dev)
++{
++ dev->transparent = 1;
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x005b, imac_transparent_bridge);
+ #endif /* CONFIG_PPC32 */
+
+ /*
diff --git a/freed-ora/current/F-12/linux-2.6-input-fix-toshiba-hotkeys.patch b/freed-ora/current/F-12/linux-2.6-input-fix-toshiba-hotkeys.patch
new file mode 100644
index 000000000..74558e69f
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-input-fix-toshiba-hotkeys.patch
@@ -0,0 +1,278 @@
+commit 61a2aa30877a6e2be1d3fb3a71385e1f741819d7
+Author: Matthew Garrett <mjg@redhat.com>
+Date: Fri Mar 6 00:25:45 2009 +0000
+
+ toshiba-acpi: Add support for hotkey notifications
+
+ Calling the ENAB method on Toshiba laptops results in notifications being
+ sent when laptop hotkeys are pressed. This patch simply calls that method
+ and sets up an input device if it's successful.
+
+ Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 40e60fc..604f9fa 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -46,6 +46,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/rfkill.h>
+ #include <linux/input-polldev.h>
++#include <linux/input.h>
+
+ #include <asm/uaccess.h>
+
+@@ -62,9 +63,10 @@ MODULE_LICENSE("GPL");
+
+ /* Toshiba ACPI method paths */
+ #define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM"
+-#define METHOD_HCI_1 "\\_SB_.VALD.GHCI"
+-#define METHOD_HCI_2 "\\_SB_.VALZ.GHCI"
++#define TOSH_INTERFACE_1 "\\_SB_.VALD"
++#define TOSH_INTERFACE_2 "\\_SB_.VALZ"
+ #define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX"
++#define GHCI_METHOD ".GHCI"
+
+ /* Toshiba HCI interface definitions
+ *
+@@ -116,6 +118,36 @@ static const struct acpi_device_id toshiba_device_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
+
++struct key_entry {
++ char type;
++ u16 code;
++ u16 keycode;
++};
++
++enum {KE_KEY, KE_END};
++
++static struct key_entry toshiba_acpi_keymap[] = {
++ {KE_KEY, 0x101, KEY_MUTE},
++ {KE_KEY, 0x13b, KEY_COFFEE},
++ {KE_KEY, 0x13c, KEY_BATTERY},
++ {KE_KEY, 0x13d, KEY_SLEEP},
++ {KE_KEY, 0x13e, KEY_SUSPEND},
++ {KE_KEY, 0x13f, KEY_SWITCHVIDEOMODE},
++ {KE_KEY, 0x140, KEY_BRIGHTNESSDOWN},
++ {KE_KEY, 0x141, KEY_BRIGHTNESSUP},
++ {KE_KEY, 0x142, KEY_WLAN},
++ {KE_KEY, 0x143, KEY_PROG1},
++ {KE_KEY, 0xb05, KEY_PROG2},
++ {KE_KEY, 0xb06, KEY_WWW},
++ {KE_KEY, 0xb07, KEY_MAIL},
++ {KE_KEY, 0xb30, KEY_STOP},
++ {KE_KEY, 0xb31, KEY_PREVIOUSSONG},
++ {KE_KEY, 0xb32, KEY_NEXTSONG},
++ {KE_KEY, 0xb33, KEY_PLAYPAUSE},
++ {KE_KEY, 0xb5a, KEY_MEDIA},
++ {KE_END, 0, 0},
++};
++
+ /* utility
+ */
+
+@@ -252,6 +284,8 @@ struct toshiba_acpi_dev {
+ struct platform_device *p_dev;
+ struct rfkill *rfk_dev;
+ struct input_polled_dev *poll_dev;
++ struct input_dev *hotkey_dev;
++ acpi_handle handle;
+
+ const char *bt_name;
+ const char *rfk_name;
+@@ -702,6 +736,154 @@ static struct backlight_ops toshiba_backlight_data = {
+ .update_status = set_lcd_status,
+ };
+
++static struct key_entry *toshiba_acpi_get_entry_by_scancode(int code)
++{
++ struct key_entry *key;
++
++ for (key = toshiba_acpi_keymap; key->type != KE_END; key++)
++ if (code == key->code)
++ return key;
++
++ return NULL;
++}
++
++static struct key_entry *toshiba_acpi_get_entry_by_keycode(int code)
++{
++ struct key_entry *key;
++
++ for (key = toshiba_acpi_keymap; key->type != KE_END; key++)
++ if (code == key->keycode && key->type == KE_KEY)
++ return key;
++
++ return NULL;
++}
++
++static int toshiba_acpi_getkeycode(struct input_dev *dev, int scancode,
++ int *keycode)
++{
++ struct key_entry *key = toshiba_acpi_get_entry_by_scancode(scancode);
++
++ if (key && key->type == KE_KEY) {
++ *keycode = key->keycode;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++static int toshiba_acpi_setkeycode(struct input_dev *dev, int scancode,
++ int keycode)
++{
++ struct key_entry *key;
++ int old_keycode;
++
++ if (keycode < 0 || keycode > KEY_MAX)
++ return -EINVAL;
++
++ key = toshiba_acpi_get_entry_by_scancode(scancode);
++ if (key && key->type == KE_KEY) {
++ old_keycode = key->keycode;
++ key->keycode = keycode;
++ set_bit(keycode, dev->keybit);
++ if (!toshiba_acpi_get_entry_by_keycode(old_keycode))
++ clear_bit(old_keycode, dev->keybit);
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *data)
++{
++ u32 hci_result, value;
++ struct key_entry *key;
++
++ if (event != 0x80)
++ return;
++ do {
++ hci_read1(HCI_SYSTEM_EVENT, &value, &hci_result);
++ if (hci_result == HCI_SUCCESS) {
++ if (value == 0x100)
++ continue;
++ else if (value & 0x80) {
++ key = toshiba_acpi_get_entry_by_scancode
++ (value & ~0x80);
++ if (!key) {
++ printk(MY_INFO "Unknown key %x\n",
++ value & ~0x80);
++ continue;
++ }
++ input_report_key(toshiba_acpi.hotkey_dev,
++ key->keycode, 1);
++ input_sync(toshiba_acpi.hotkey_dev);
++ input_report_key(toshiba_acpi.hotkey_dev,
++ key->keycode, 0);
++ input_sync(toshiba_acpi.hotkey_dev);
++ }
++ } else if (hci_result == HCI_NOT_SUPPORTED) {
++ /* This is a workaround for an unresolved issue on
++ * some machines where system events sporadically
++ * become disabled. */
++ hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
++ printk(MY_NOTICE "Re-enabled hotkeys\n");
++ }
++ } while (hci_result != HCI_EMPTY);
++}
++
++static int toshiba_acpi_setup_keyboard(char *device)
++{
++ acpi_status status;
++ acpi_handle handle;
++ int result;
++ const struct key_entry *key;
++
++ status = acpi_get_handle(NULL, device, &handle);
++ if (ACPI_FAILURE(status)) {
++ printk(MY_INFO "Unable to get notification device\n");
++ return -ENODEV;
++ }
++
++ toshiba_acpi.handle = handle;
++
++ status = acpi_evaluate_object(handle, "ENAB", NULL, NULL);
++ if (ACPI_FAILURE(status)) {
++ printk(MY_INFO "Unable to enable hotkeys\n");
++ return -ENODEV;
++ }
++
++ status = acpi_install_notify_handler (handle, ACPI_DEVICE_NOTIFY,
++ toshiba_acpi_notify, NULL);
++ if (ACPI_FAILURE(status)) {
++ printk(MY_INFO "Unable to install hotkey notification\n");
++ return -ENODEV;
++ }
++
++ toshiba_acpi.hotkey_dev = input_allocate_device();
++ if (!toshiba_acpi.hotkey_dev) {
++ printk(MY_INFO "Unable to register input device\n");
++ return -ENOMEM;
++ }
++
++ toshiba_acpi.hotkey_dev->name = "Toshiba input device";
++ toshiba_acpi.hotkey_dev->phys = device;
++ toshiba_acpi.hotkey_dev->id.bustype = BUS_HOST;
++ toshiba_acpi.hotkey_dev->getkeycode = toshiba_acpi_getkeycode;
++ toshiba_acpi.hotkey_dev->setkeycode = toshiba_acpi_setkeycode;
++
++ for (key = toshiba_acpi_keymap; key->type != KE_END; key++) {
++ set_bit(EV_KEY, toshiba_acpi.hotkey_dev->evbit);
++ set_bit(key->keycode, toshiba_acpi.hotkey_dev->keybit);
++ }
++
++ result = input_register_device(toshiba_acpi.hotkey_dev);
++ if (result) {
++ printk(MY_INFO "Unable to register input device\n");
++ return result;
++ }
++
++ return 0;
++}
++
+ static void toshiba_acpi_exit(void)
+ {
+ if (toshiba_acpi.poll_dev) {
+@@ -709,12 +891,18 @@ static void toshiba_acpi_exit(void)
+ input_free_polled_device(toshiba_acpi.poll_dev);
+ }
+
++ if (toshiba_acpi.hotkey_dev)
++ input_unregister_device(toshiba_acpi.hotkey_dev);
++
+ if (toshiba_acpi.rfk_dev)
+ rfkill_unregister(toshiba_acpi.rfk_dev);
+
+ if (toshiba_backlight_device)
+ backlight_device_unregister(toshiba_backlight_device);
+
++ acpi_remove_notify_handler(toshiba_acpi.handle, ACPI_DEVICE_NOTIFY,
++ toshiba_acpi_notify);
++
+ remove_device();
+
+ if (toshiba_proc_dir)
+@@ -738,11 +926,15 @@ static int __init toshiba_acpi_init(void)
+ return -ENODEV;
+
+ /* simple device detection: look for HCI method */
+- if (is_valid_acpi_path(METHOD_HCI_1))
+- method_hci = METHOD_HCI_1;
+- else if (is_valid_acpi_path(METHOD_HCI_2))
+- method_hci = METHOD_HCI_2;
+- else
++ if (is_valid_acpi_path(TOSH_INTERFACE_1 GHCI_METHOD)) {
++ method_hci = TOSH_INTERFACE_1 GHCI_METHOD;
++ if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_1))
++ printk(MY_INFO "Unable to activate hotkeys\n");
++ } else if (is_valid_acpi_path(TOSH_INTERFACE_2 GHCI_METHOD)) {
++ method_hci = TOSH_INTERFACE_2 GHCI_METHOD;
++ if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_2))
++ printk(MY_INFO "Unable to activate hotkeys\n");
++ } else
+ return -ENODEV;
+
+ printk(MY_INFO "Toshiba Laptop ACPI Extras version %s\n",
diff --git a/freed-ora/current/F-12/linux-2.6-input-hid-quirk-egalax.patch b/freed-ora/current/F-12/linux-2.6-input-hid-quirk-egalax.patch
new file mode 100644
index 000000000..db38685f0
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-input-hid-quirk-egalax.patch
@@ -0,0 +1,41 @@
+Date: Mon, 1 Feb 2010 12:53:47 +1300
+From: Peter Hutterer <peter.hutterer@redhat.com>
+To: Dave Airlie <airlied@redhat.com>
+Subject: [PATCH] HID: add multi-input quirk for eGalax Touchcontroller
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Tested-by: Alfred Broda <guaranga@wp.pl>
+---
+ drivers/hid/hid-ids.h | 3 +++
+ drivers/hid/usbhid/hid-quirks.c | 1 +
+ 2 files changed, 4 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index f5144b8..2e698a2 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -158,6 +158,9 @@
+
+ #define USB_VENDOR_ID_DRAGONRISE 0x0079
+
++#define USB_VENDOR_ID_EGALAX 0x0EEF
++#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
++
+ #define USB_VENDOR_ID_ELO 0x04E7
+ #define USB_DEVICE_ID_ELO_TS2700 0x0020
+
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index e987562..dc27d74 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -32,6 +32,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
++ { USB_VENDOR_ID_EGALAX, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+--
+1.6.5.2
+
diff --git a/freed-ora/current/F-12/linux-2.6-input-kill-stupid-messages.patch b/freed-ora/current/F-12/linux-2.6-input-kill-stupid-messages.patch
new file mode 100644
index 000000000..2e6314694
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-input-kill-stupid-messages.patch
@@ -0,0 +1,17 @@
+--- linux-2.6.21.noarch/drivers/input/keyboard/atkbd.c~ 2007-07-06 10:51:04.000000000 -0400
++++ linux-2.6.21.noarch/drivers/input/keyboard/atkbd.c 2007-07-06 10:51:33.000000000 -0400
+@@ -409,10 +409,14 @@ static irqreturn_t atkbd_interrupt(struc
+ goto out;
+ case ATKBD_RET_ACK:
+ case ATKBD_RET_NAK:
++#if 0
++ /* Quite a few key switchers and other tools trigger this and it confuses
++ people who can do nothing about it */
+ if (printk_ratelimit())
+ printk(KERN_WARNING "atkbd.c: Spurious %s on %s. "
+ "Some program might be trying access hardware directly.\n",
+ data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
++#endif
+ goto out;
+ case ATKBD_RET_ERR:
+ atkbd->err_count++;
diff --git a/freed-ora/current/F-12/linux-2.6-ksm-kvm.patch b/freed-ora/current/F-12/linux-2.6-ksm-kvm.patch
new file mode 100644
index 000000000..9fac2a8dc
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-ksm-kvm.patch
@@ -0,0 +1,314 @@
+When using mmu notifiers, we are allowed to remove the page count
+reference tooken by get_user_pages to a specific page that is mapped
+inside the shadow page tables.
+
+This is needed so we can balance the pagecount against mapcount
+checking.
+
+(Right now kvm increase the pagecount and does not increase the
+mapcount when mapping page into shadow page table entry,
+so when comparing pagecount against mapcount, you have no
+reliable result.)
+
+add SPTE_HOST_WRITEABLE flag notify that the host physical page we are
+pointing to from the spte is write protected, and therefore we cant
+change its access to be write unless we run get_user_pages(write = 1).
+
+(this is needed for change_pte support in kvm)
+
+support for change_pte mmu notifiers is needed for kvm if it want ksm to
+directly map pages into its shadow page tables.
+
+Signed-off-by: Izik Eidus <ieidus@redhat.com>
+Signed-off-by: Justin M. Forbes <jforbes@redhat.com>
+---
+--- linux-2.6.30.x86_64/arch/x86/include/asm/kvm_host.h 2009-08-20 10:37:37.784886414 -0500
++++ linux-2.6.30.x86_64.kvm/arch/x86/include/asm/kvm_host.h 2009-08-20 10:39:33.742641558 -0500
+@@ -796,5 +796,6 @@ asmlinkage void kvm_handle_fault_on_rebo
+ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+ int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+ int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
++void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+ #endif /* _ASM_X86_KVM_HOST_H */
+--- linux-2.6.30.x86_64/arch/x86/kvm/mmu.c 2009-08-20 10:37:37.964887039 -0500
++++ linux-2.6.30.x86_64.kvm/arch/x86/kvm/mmu.c 2009-08-20 10:41:15.231638028 -0500
+@@ -139,6 +139,8 @@ module_param(oos_shadow, bool, 0644);
+ #define ACC_USER_MASK PT_USER_MASK
+ #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+
++#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
++
+ #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+
+ struct kvm_rmap_desc {
+@@ -254,6 +256,11 @@ static pfn_t spte_to_pfn(u64 pte)
+ return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+ }
+
++static pte_t ptep_val(pte_t *ptep)
++{
++ return *ptep;
++}
++
+ static gfn_t pse36_gfn_delta(u32 gpte)
+ {
+ int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
+@@ -573,9 +580,7 @@ static void rmap_remove(struct kvm *kvm,
+ if (*spte & shadow_accessed_mask)
+ kvm_set_pfn_accessed(pfn);
+ if (is_writeble_pte(*spte))
+- kvm_release_pfn_dirty(pfn);
+- else
+- kvm_release_pfn_clean(pfn);
++ kvm_set_pfn_dirty(pfn);
+ rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
+ if (!*rmapp) {
+ printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
+@@ -684,7 +689,8 @@ static int rmap_write_protect(struct kvm
+ return write_protected;
+ }
+
+-static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
++static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
++ unsigned long data)
+ {
+ u64 *spte;
+ int need_tlb_flush = 0;
+@@ -699,8 +705,48 @@ static int kvm_unmap_rmapp(struct kvm *k
+ return need_tlb_flush;
+ }
+
++static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
++ unsigned long data)
++{
++ int need_flush = 0;
++ u64 *spte, new_spte;
++ pte_t *ptep = (pte_t *)data;
++ pfn_t new_pfn;
++
++ new_pfn = pte_pfn(ptep_val(ptep));
++ spte = rmap_next(kvm, rmapp, NULL);
++ while (spte) {
++ BUG_ON(!is_shadow_present_pte(*spte));
++ rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
++ need_flush = 1;
++ if (pte_write(ptep_val(ptep))) {
++ rmap_remove(kvm, spte);
++ set_shadow_pte(spte, shadow_trap_nonpresent_pte);
++ spte = rmap_next(kvm, rmapp, NULL);
++ } else {
++ new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
++ new_spte |= new_pfn << PAGE_SHIFT;
++
++ if (!pte_write(ptep_val(ptep))) {
++ new_spte &= ~PT_WRITABLE_MASK;
++ new_spte &= ~SPTE_HOST_WRITEABLE;
++ if (is_writeble_pte(*spte))
++ kvm_set_pfn_dirty(spte_to_pfn(*spte));
++ }
++ set_shadow_pte(spte, new_spte);
++ spte = rmap_next(kvm, rmapp, spte);
++ }
++ }
++ if (need_flush)
++ kvm_flush_remote_tlbs(kvm);
++
++ return 0;
++}
++
+ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
+- int (*handler)(struct kvm *kvm, unsigned long *rmapp))
++ unsigned long data,
++ int (*handler)(struct kvm *kvm, unsigned long *rmapp,
++ unsigned long data))
+ {
+ int i;
+ int retval = 0;
+@@ -721,11 +767,13 @@ static int kvm_handle_hva(struct kvm *kv
+ end = start + (memslot->npages << PAGE_SHIFT);
+ if (hva >= start && hva < end) {
+ gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+- retval |= handler(kvm, &memslot->rmap[gfn_offset]);
++ retval |= handler(kvm, &memslot->rmap[gfn_offset],
++ data);
+ retval |= handler(kvm,
+ &memslot->lpage_info[
+ gfn_offset /
+- KVM_PAGES_PER_HPAGE].rmap_pde);
++ KVM_PAGES_PER_HPAGE].rmap_pde,
++ data);
+ }
+ }
+
+@@ -734,10 +782,16 @@ static int kvm_handle_hva(struct kvm *kv
+
+ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+ {
+- return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
++ return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
++}
++
++void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
++{
++ kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
+ }
+
+-static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
++static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
++ unsigned long data)
+ {
+ u64 *spte;
+ int young = 0;
+@@ -770,13 +824,13 @@ static void rmap_recycle(struct kvm_vcpu
+ gfn = unalias_gfn(vcpu->kvm, gfn);
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
+
+- kvm_unmap_rmapp(vcpu->kvm, rmapp);
++ kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
+ kvm_flush_remote_tlbs(vcpu->kvm);
+ }
+
+ int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+ {
+- return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
++ return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
+ }
+
+ #ifdef MMU_DEBUG
+@@ -1686,7 +1740,7 @@ static int set_spte(struct kvm_vcpu *vcp
+ unsigned pte_access, int user_fault,
+ int write_fault, int dirty, int largepage,
+ gfn_t gfn, pfn_t pfn, bool speculative,
+- bool can_unsync)
++ bool can_unsync, bool reset_host_protection)
+ {
+ u64 spte;
+ int ret = 0;
+@@ -1744,6 +1798,8 @@ static int set_spte(struct kvm_vcpu *vcp
+ spte &= ~PT_WRITABLE_MASK;
+ }
+ }
++ if (reset_host_protection)
++ spte |= SPTE_HOST_WRITEABLE;
+
+ if (pte_access & ACC_WRITE_MASK)
+ mark_page_dirty(vcpu->kvm, gfn);
+@@ -1757,7 +1813,8 @@ static void mmu_set_spte(struct kvm_vcpu
+ unsigned pt_access, unsigned pte_access,
+ int user_fault, int write_fault, int dirty,
+ int *ptwrite, int largepage, gfn_t gfn,
+- pfn_t pfn, bool speculative)
++ pfn_t pfn, bool speculative,
++ bool reset_host_protection)
+ {
+ int was_rmapped = 0;
+ int was_writeble = is_writeble_pte(*shadow_pte);
+@@ -1787,7 +1844,8 @@ static void mmu_set_spte(struct kvm_vcpu
+ was_rmapped = 1;
+ }
+ if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
+- dirty, largepage, gfn, pfn, speculative, true)) {
++ dirty, largepage, gfn, pfn, speculative, true,
++ reset_host_protection)) {
+ if (write_fault)
+ *ptwrite = 1;
+ kvm_x86_ops->tlb_flush(vcpu);
+@@ -1804,8 +1862,7 @@ static void mmu_set_spte(struct kvm_vcpu
+ page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
+ if (!was_rmapped) {
+ rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
+- if (!is_rmap_pte(*shadow_pte))
+- kvm_release_pfn_clean(pfn);
++ kvm_release_pfn_clean(pfn);
+ if (rmap_count > RMAP_RECYCLE_THRESHOLD)
+ rmap_recycle(vcpu, gfn, largepage);
+ } else {
+@@ -1837,7 +1894,7 @@ static int __direct_map(struct kvm_vcpu
+ || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
+ mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
+ 0, write, 1, &pt_write,
+- largepage, gfn, pfn, false);
++ largepage, gfn, pfn, false, true);
+ ++vcpu->stat.pf_fixed;
+ break;
+ }
+--- linux-2.6.30.x86_64/arch/x86/kvm/paging_tmpl.h 2009-08-20 10:37:37.966889166 -0500
++++ linux-2.6.30.x86_64.kvm/arch/x86/kvm/paging_tmpl.h 2009-08-20 10:39:33.747636180 -0500
+@@ -266,9 +266,13 @@ static void FNAME(update_pte)(struct kvm
+ if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
+ return;
+ kvm_get_pfn(pfn);
++ /*
++ * we call mmu_set_spte() with reset_host_protection = true beacuse that
++ * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
++ */
+ mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
+ gpte & PT_DIRTY_MASK, NULL, largepage,
+- gpte_to_gfn(gpte), pfn, true);
++ gpte_to_gfn(gpte), pfn, true, true);
+ }
+
+ /*
+@@ -302,7 +306,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
+ user_fault, write_fault,
+ gw->ptes[gw->level-1] & PT_DIRTY_MASK,
+ ptwrite, largepage,
+- gw->gfn, pfn, false);
++ gw->gfn, pfn, false, true);
+ break;
+ }
+
+@@ -552,6 +556,7 @@ static void FNAME(prefetch_page)(struct
+ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+ {
+ int i, offset, nr_present;
++ bool reset_host_protection = 1;
+
+ offset = nr_present = 0;
+
+@@ -589,9 +594,13 @@ static int FNAME(sync_page)(struct kvm_v
+
+ nr_present++;
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
++ if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
++ pte_access &= ~PT_WRITABLE_MASK;
++ reset_host_protection = 0;
++ } else { reset_host_protection = 1; }
+ set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
+ is_dirty_pte(gpte), 0, gfn,
+- spte_to_pfn(sp->spt[i]), true, false);
++ spte_to_pfn(sp->spt[i]), true, false, reset_host_protection);
+ }
+
+ return !nr_present;
+--- linux-2.6.30.x86_64/virt/kvm/kvm_main.c 2009-08-20 10:37:45.448886340 -0500
++++ linux-2.6.30.x86_64.kvm/virt/kvm/kvm_main.c 2009-08-20 10:39:33.749636212 -0500
+@@ -859,6 +859,19 @@ static void kvm_mmu_notifier_invalidate_
+
+ }
+
++static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
++ struct mm_struct *mm,
++ unsigned long address,
++ pte_t pte)
++{
++ struct kvm *kvm = mmu_notifier_to_kvm(mn);
++
++ spin_lock(&kvm->mmu_lock);
++ kvm->mmu_notifier_seq++;
++ kvm_set_spte_hva(kvm, address, pte);
++ spin_unlock(&kvm->mmu_lock);
++}
++
+ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+@@ -938,6 +951,7 @@ static const struct mmu_notifier_ops kvm
+ .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+ .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
+ .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
++ .change_pte = kvm_mmu_notifier_change_pte,
+ .release = kvm_mmu_notifier_release,
+ };
+ #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
diff --git a/freed-ora/current/F-12/linux-2.6-makefile-after_link.patch b/freed-ora/current/F-12/linux-2.6-makefile-after_link.patch
new file mode 100644
index 000000000..94b71f9b1
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-makefile-after_link.patch
@@ -0,0 +1,57 @@
+diff --git a/Makefile b/Makefile
+index f908acc..960ff6f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -746,6 +746,10 @@ quiet_cmd_vmlinux__ ?= LD $@
+ --start-group $(vmlinux-main) --end-group \
+ $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^)
+
++ifdef AFTER_LINK
++cmd_vmlinux__ += ; $(AFTER_LINK)
++endif
++
+ # Generate new vmlinux version
+ quiet_cmd_vmlinux_version = GEN .version
+ cmd_vmlinux_version = set -e; \
+diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
+index 51ead52..ad21273 100644
+--- a/arch/powerpc/kernel/vdso32/Makefile
++++ b/arch/powerpc/kernel/vdso32/Makefile
+@@ -41,7 +41,8 @@ $(obj-vdso32): %.o: %.S
+
+ # actual build commands
+ quiet_cmd_vdso32ld = VDSO32L $@
+- cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@
++ cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ \
++ $(if $(AFTER_LINK),; $(AFTER_LINK))
+ quiet_cmd_vdso32as = VDSO32A $@
+ cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
+
+diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
+index 79da65d..f11c21b 100644
+--- a/arch/powerpc/kernel/vdso64/Makefile
++++ b/arch/powerpc/kernel/vdso64/Makefile
+@@ -36,7 +36,8 @@ $(obj-vdso64): %.o: %.S
+
+ # actual build commands
+ quiet_cmd_vdso64ld = VDSO64L $@
+- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
++ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ \
++ $(if $(AFTER_LINK),; $(AFTER_LINK))
+ quiet_cmd_vdso64as = VDSO64A $@
+ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+
+diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
+index 6b4ffed..cbc3d05 100644
+--- a/arch/x86/vdso/Makefile
++++ b/arch/x86/vdso/Makefile
+@@ -120,7 +120,8 @@ $(obj)/vdso32-syms.lds: $(vdso32.so-y:%=$(obj)/vdso32-%-syms.lds) FORCE
+ quiet_cmd_vdso = VDSO $@
+ cmd_vdso = $(CC) -nostdlib -o $@ \
+ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
++ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) \
++ $(if $(AFTER_LINK),; $(AFTER_LINK))
+
+ VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ GCOV_PROFILE := n
diff --git a/freed-ora/current/F-12/linux-2.6-nfs4-callback-hidden.patch b/freed-ora/current/F-12/linux-2.6-nfs4-callback-hidden.patch
new file mode 100644
index 000000000..8fc236836
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-nfs4-callback-hidden.patch
@@ -0,0 +1,20 @@
+Author: Steve Dickson <steved@redhat.com>
+Date: Tue Oct 13 15:59:57 EDT 2009
+
+To avoid hangs in the svc_unregister(), on version 4 mounts
+(and unmounts), when rpcbind is not running, make the nfs4 callback
+program an 'hidden' service by setting the 'vs_hidden' flag in the
+nfs4_callback_version structure.
+
+Signed-off-by: Steve Dickson <steved@redhat.com>
+
+diff -up linux-2.6.31.x86_64/fs/nfs/callback_xdr.c.orig linux-2.6.31.x86_64/fs/nfs/callback_xdr.c
+--- linux-2.6.31.x86_64/fs/nfs/callback_xdr.c.orig 2009-09-09 18:13:59.000000000 -0400
++++ linux-2.6.31.x86_64/fs/nfs/callback_xdr.c 2009-10-13 15:40:19.000000000 -0400
+@@ -716,5 +716,6 @@ struct svc_version nfs4_callback_version
+ .vs_proc = nfs4_callback_procedures1,
+ .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
+ .vs_dispatch = NULL,
++ .vs_hidden = 1,
+ };
+
diff --git a/freed-ora/current/F-12/linux-2.6-nfsd4-proots.patch b/freed-ora/current/F-12/linux-2.6-nfsd4-proots.patch
new file mode 100644
index 000000000..84d589470
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-nfsd4-proots.patch
@@ -0,0 +1,226 @@
+diff -up linux-2.6.32.i686/fs/nfsd/export.c.save linux-2.6.32.i686/fs/nfsd/export.c
+--- linux-2.6.32.i686/fs/nfsd/export.c.save 2009-12-04 10:24:17.000000000 -0500
++++ linux-2.6.32.i686/fs/nfsd/export.c 2009-12-04 10:40:52.000000000 -0500
+@@ -372,10 +372,12 @@ static struct svc_export *svc_export_loo
+ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
+ {
+
+- /* We currently export only dirs and regular files.
+- * This is what umountd does.
++ /*
++ * We currently export only dirs, regular files, and (for v4
++ * pseudoroot) symlinks.
+ */
+ if (!S_ISDIR(inode->i_mode) &&
++ !S_ISLNK(inode->i_mode) &&
+ !S_ISREG(inode->i_mode))
+ return -ENOTDIR;
+
+@@ -1425,6 +1427,7 @@ static struct flags {
+ { NFSEXP_CROSSMOUNT, {"crossmnt", ""}},
+ { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
+ { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
++ { NFSEXP_V4ROOT, {"v4root", ""}},
+ #ifdef MSNFS
+ { NFSEXP_MSNFS, {"msnfs", ""}},
+ #endif
+@@ -1505,7 +1508,7 @@ static int e_show(struct seq_file *m, vo
+ struct svc_export *exp = container_of(cp, struct svc_export, h);
+
+ if (p == SEQ_START_TOKEN) {
+- seq_puts(m, "# Version 1.1\n");
++ seq_puts(m, "# Version 1.2\n");
+ seq_puts(m, "# Path Client(Flags) # IPs\n");
+ return 0;
+ }
+diff -up linux-2.6.32.i686/fs/nfsd/nfs4xdr.c.save linux-2.6.32.i686/fs/nfsd/nfs4xdr.c
+--- linux-2.6.32.i686/fs/nfsd/nfs4xdr.c.save 2009-12-04 10:24:17.000000000 -0500
++++ linux-2.6.32.i686/fs/nfsd/nfs4xdr.c 2009-12-04 10:26:49.000000000 -0500
+@@ -2204,11 +2204,14 @@ nfsd4_encode_dirent_fattr(struct nfsd4_r
+ * we will not follow the cross mount and will fill the attribtutes
+ * directly from the mountpoint dentry.
+ */
+- if (d_mountpoint(dentry) && !attributes_need_mount(cd->rd_bmval))
+- ignore_crossmnt = 1;
+- else if (d_mountpoint(dentry)) {
++ if (nfsd_mountpoint(dentry, exp)) {
+ int err;
+
++ if (!(exp->ex_flags & NFSEXP_V4ROOT)
++ && !attributes_need_mount(cd->rd_bmval)) {
++ ignore_crossmnt = 1;
++ goto out_encode;
++ }
+ /*
+ * Why the heck aren't we just using nfsd_lookup??
+ * Different "."/".." handling? Something else?
+@@ -2224,6 +2227,7 @@ nfsd4_encode_dirent_fattr(struct nfsd4_r
+ goto out_put;
+
+ }
++out_encode:
+ nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval,
+ cd->rd_rqstp, ignore_crossmnt);
+ out_put:
+diff -up linux-2.6.32.i686/fs/nfsd/nfsfh.c.save linux-2.6.32.i686/fs/nfsd/nfsfh.c
+--- linux-2.6.32.i686/fs/nfsd/nfsfh.c.save 2009-12-04 10:24:17.000000000 -0500
++++ linux-2.6.32.i686/fs/nfsd/nfsfh.c 2009-12-04 10:38:26.000000000 -0500
+@@ -109,6 +109,36 @@ static __be32 nfsd_setuser_and_check_por
+ return nfserrno(nfsd_setuser(rqstp, exp));
+ }
+
++static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
++ struct dentry *dentry, struct svc_export *exp)
++{
++ if (!(exp->ex_flags & NFSEXP_V4ROOT))
++ return nfs_ok;
++ /*
++ * v2/v3 clients have no need for the V4ROOT export--they use
++ * the mount protocl instead; also, further V4ROOT checks may be
++ * in v4-specific code, in which case v2/v3 clients could bypass
++ * them.
++ */
++ if (!nfsd_v4client(rqstp))
++ return nfserr_stale;
++ /*
++ * We're exposing only the directories and symlinks that have to be
++ * traversed on the way to real exports:
++ */
++ if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) &&
++ !S_ISLNK(dentry->d_inode->i_mode)))
++ return nfserr_stale;
++ /*
++ * A pseudoroot export gives permission to access only one
++ * single directory; the kernel has to make another upcall
++ * before granting access to anything else under it:
++ */
++ if (unlikely(dentry != exp->ex_path.dentry))
++ return nfserr_stale;
++ return nfs_ok;
++}
++
+ /*
+ * Use the given filehandle to look up the corresponding export and
+ * dentry. On success, the results are used to set fh_export and
+@@ -317,6 +347,13 @@ fh_verify(struct svc_rqst *rqstp, struct
+ goto out;
+ }
+
++ /*
++ * Do some spoof checking if we are on the pseudo root
++ */
++ error = check_pseudo_root(rqstp, dentry, exp);
++ if (error)
++ goto out;
++
+ error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
+ if (error)
+ goto out;
+diff -up linux-2.6.32.i686/fs/nfsd/vfs.c.save linux-2.6.32.i686/fs/nfsd/vfs.c
+--- linux-2.6.32.i686/fs/nfsd/vfs.c.save 2009-12-04 10:24:18.000000000 -0500
++++ linux-2.6.32.i686/fs/nfsd/vfs.c 2009-12-04 10:35:04.000000000 -0500
+@@ -89,12 +89,6 @@ struct raparm_hbucket {
+ #define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
+ static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
+
+-static inline int
+-nfsd_v4client(struct svc_rqst *rq)
+-{
+- return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
+-}
+-
+ /*
+ * Called from nfsd_lookup and encode_dirent. Check if we have crossed
+ * a mount point.
+@@ -116,8 +110,16 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, s
+
+ exp2 = rqst_exp_get_by_name(rqstp, &path);
+ if (IS_ERR(exp2)) {
+- if (PTR_ERR(exp2) != -ENOENT)
+- err = PTR_ERR(exp2);
++ err = PTR_ERR(exp2);
++ /*
++ * We normally allow NFS clients to continue
++ * "underneath" a mountpoint that is not exported.
++ * The exception is V4ROOT, where no traversal is ever
++ * allowed without an explicit export of the new
++ * directory.
++ */
++ if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT))
++ err = 0;
+ path_put(&path);
+ goto out;
+ }
+@@ -141,6 +143,19 @@ out:
+ return err;
+ }
+
++/*
++ * For nfsd purposes, we treat V4ROOT exports as though there was an
++ * export at *every* directory.
++ */
++int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
++{
++ if (d_mountpoint(dentry))
++ return 1;
++ if (!(exp->ex_flags & NFSEXP_V4ROOT))
++ return 0;
++ return dentry->d_inode != NULL;
++}
++
+ __be32
+ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ const char *name, unsigned int len,
+@@ -208,7 +223,7 @@ nfsd_lookup_dentry(struct svc_rqst *rqst
+ /*
+ * check if we have crossed a mount point ...
+ */
+- if (d_mountpoint(dentry)) {
++ if (nfsd_mountpoint(dentry, exp)) {
+ if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) {
+ dput(dentry);
+ goto out_nfserr;
+diff -up linux-2.6.32.i686/include/linux/nfsd/export.h.save linux-2.6.32.i686/include/linux/nfsd/export.h
+--- linux-2.6.32.i686/include/linux/nfsd/export.h.save 2009-12-04 10:24:18.000000000 -0500
++++ linux-2.6.32.i686/include/linux/nfsd/export.h 2009-12-04 10:25:08.000000000 -0500
+@@ -39,7 +39,17 @@
+ #define NFSEXP_FSID 0x2000
+ #define NFSEXP_CROSSMOUNT 0x4000
+ #define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */
+-#define NFSEXP_ALLFLAGS 0xFE3F
++/*
++ * The NFSEXP_V4ROOT flag causes the kernel to give access only to NFSv4
++ * clients, and only to the single directory that is the root of the
++ * export; further lookup and readdir operations are treated as if every
++ * subdirectory was a mountpoint, and ignored if they are not themselves
++ * exported. This is used by nfsd and mountd to construct the NFSv4
++ * pseudofilesystem, which provides access only to paths leading to each
++ * exported filesystem.
++ */
++#define NFSEXP_V4ROOT 0x10000
++#define NFSEXP_ALLFLAGS 0x1FE3F
+
+ /* The flags that may vary depending on security flavor: */
+ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
+diff -up linux-2.6.32.i686/include/linux/nfsd/nfsd.h.save linux-2.6.32.i686/include/linux/nfsd/nfsd.h
+--- linux-2.6.32.i686/include/linux/nfsd/nfsd.h.save 2009-12-04 10:24:18.000000000 -0500
++++ linux-2.6.32.i686/include/linux/nfsd/nfsd.h 2009-12-04 10:39:18.000000000 -0500
+@@ -86,6 +86,7 @@ __be32 nfsd_lookup_dentry(struct svc_r
+ struct svc_export **, struct dentry **);
+ __be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
+ struct iattr *, int, time_t);
++int nfsd_mountpoint(struct dentry *, struct svc_export *);
+ #ifdef CONFIG_NFSD_V4
+ __be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
+ struct nfs4_acl *);
+@@ -394,6 +395,10 @@ static inline u32 nfsd_suppattrs2(u32 mi
+ return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2
+ : NFSD4_SUPPORTED_ATTRS_WORD2;
+ }
++static inline int nfsd_v4client(struct svc_rqst *rq)
++{
++ return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
++}
+
+ /* These will return ERR_INVAL if specified in GETATTR or READDIR. */
+ #define NFSD_WRITEONLY_ATTRS_WORD1 \
diff --git a/freed-ora/current/F-12/linux-2.6-pci-cacheline-sizing.patch b/freed-ora/current/F-12/linux-2.6-pci-cacheline-sizing.patch
new file mode 100644
index 000000000..8acaee494
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-pci-cacheline-sizing.patch
@@ -0,0 +1,41 @@
+PCI: Use generic cacheline sizing instead of per-vendor tests.
+
+Instead of the pci code needing to have code to determine the
+cacheline size of each processor, use the data the cpu identification
+code should have already determined during early boot.
+
+I chose not to delete the existing code for the time being.
+Instead I added some additional debug statements to be sure that it's
+doing the right thing, and compares it against what the old code would
+have done. After this has been proven to be right in a release,
+we can delete the paranoid checks, and all the old vendor checking code.
+
+Signed-off-by: Dave Jones <davej@redhat.com>
+
+diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
+index 2202b62..f371fe8 100644
+--- a/arch/x86/pci/common.c
++++ b/arch/x86/pci/common.c
+@@ -432,6 +432,22 @@ int __init pcibios_init(void)
+ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
+ pci_cache_line_size = 128 >> 2; /* P4 */
+
++ if (c->x86_clflush_size != (pci_cache_line_size <<2))
++ printk(KERN_DEBUG "PCI: old code would have set cacheline "
++ "size to %d bytes, but clflush_size = %d\n",
++ pci_cache_line_size << 2,
++ c->x86_clflush_size);
++
++ /* Once we know this logic works, all the above code can be deleted. */
++ if (c->x86_clflush_size > 0) {
++ pci_cache_line_size = c->x86_clflush_size >> 2;
++ printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
++ pci_cache_line_size << 2);
++ } else {
++ pci_cache_line_size = 32 >> 2;
++ printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
++ }
++
+ pcibios_resource_survey();
+
+ if (pci_bf_sort >= pci_force_bf)
diff --git a/freed-ora/current/F-12/linux-2.6-pciehp-update.patch b/freed-ora/current/F-12/linux-2.6-pciehp-update.patch
new file mode 100644
index 000000000..38ec79724
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-pciehp-update.patch
@@ -0,0 +1,147 @@
+diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
+index b2801a7..c9f18f9 100644
+--- a/drivers/pci/hotplug/pciehp.h
++++ b/drivers/pci/hotplug/pciehp.h
+@@ -224,6 +224,10 @@ static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
+ {
+ u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
++ if (pciehp_force) {
++ dev_info(&dev->dev, "Bypassing BIOS check for pciehp\n");
++ return 0;
++ }
+ return acpi_get_hp_hw_control_from_firmware(dev, flags);
+ }
+
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 39cf248..ab6b016 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -41,6 +41,7 @@ int pciehp_debug;
+ int pciehp_poll_mode;
+ int pciehp_poll_time;
+ int pciehp_force;
++int pciehp_passive;
+ struct workqueue_struct *pciehp_wq;
+
+ #define DRIVER_VERSION "0.4"
+@@ -50,15 +51,18 @@ struct workqueue_struct *pciehp_wq;
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("acpi*:PNP0A08:*");
+
+ module_param(pciehp_debug, bool, 0644);
+ module_param(pciehp_poll_mode, bool, 0644);
+ module_param(pciehp_poll_time, int, 0644);
+ module_param(pciehp_force, bool, 0644);
++module_param(pciehp_passive, bool, 0644);
+ MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
+ MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
+ MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
+ MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
++MODULE_PARM_DESC(pciehp_passive, "Listen for pciehp events, even if _OSC and OSHP are missing");
+
+ #define PCIE_MODULE_NAME "pciehp"
+
+@@ -85,6 +89,13 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
+ .get_cur_bus_speed = get_cur_bus_speed,
+ };
+
++static struct hotplug_slot_ops pciehp_passive_hotplug_slot_ops = {
++ .owner = THIS_MODULE,
++ .get_adapter_status = get_adapter_status,
++ .get_max_bus_speed = get_max_bus_speed,
++ .get_cur_bus_speed = get_cur_bus_speed,
++};
++
+ /*
+ * Check the status of the Electro Mechanical Interlock (EMI)
+ */
+@@ -212,7 +223,11 @@ static int init_slots(struct controller *ctrl)
+ hotplug_slot->info = info;
+ hotplug_slot->private = slot;
+ hotplug_slot->release = &release_slot;
+- hotplug_slot->ops = &pciehp_hotplug_slot_ops;
++ if (pciehp_passive &&
++ pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev))
++ hotplug_slot->ops = &pciehp_passive_hotplug_slot_ops;
++ else
++ hotplug_slot->ops = &pciehp_hotplug_slot_ops;
+ slot->hotplug_slot = hotplug_slot;
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
+
+@@ -407,11 +422,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
+ u8 value;
+ struct pci_dev *pdev = dev->port;
+
+- if (pciehp_force)
+- dev_info(&dev->device,
+- "Bypassing BIOS check for pciehp use on %s\n",
+- pci_name(pdev));
+- else if (pciehp_get_hp_hw_control_from_firmware(pdev))
++ if (!pciehp_passive && pciehp_get_hp_hw_control_from_firmware(pdev))
+ goto err_out_none;
+
+ ctrl = pcie_init(dev);
+@@ -436,7 +447,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
+ t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
+ t_slot->hpc_ops->get_adapter_status(t_slot, &value);
+ if (value) {
+- if (pciehp_force)
++ if (pciehp_force || pciehp_passive)
+ pciehp_enable_slot(t_slot);
+ } else {
+ /* Power off slot if not occupied */
+@@ -474,8 +485,11 @@ static int pciehp_suspend (struct pcie_device *dev, pm_message_t state)
+
+ static int pciehp_resume (struct pcie_device *dev)
+ {
++ struct pci_dev *pdev = dev->port;
+ dev_info(&dev->device, "%s ENTRY\n", __func__);
+- if (pciehp_force) {
++
++ if (pciehp_force || (pciehp_passive &&
++ pciehp_get_hp_hw_control_from_firmware(pdev))) {
+ struct controller *ctrl = get_service_data(dev);
+ struct slot *t_slot;
+ u8 status;
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index fead63c..12640bd 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -185,7 +185,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
+ * before taking any action that relies on power having been
+ * removed from the slot/adapter.
+ */
+- msleep(1000);
++ if (PWR_LED(ctrl) || ATTN_LED(ctrl))
++ msleep(1000);
+
+ if (PWR_LED(ctrl))
+ pslot->hpc_ops->green_led_off(pslot);
+@@ -288,16 +289,16 @@ static int remove_board(struct slot *p_slot)
+ }
+ }
+
+- /*
+- * After turning power off, we must wait for at least 1 second
+- * before taking any action that relies on power having been
+- * removed from the slot/adapter.
+- */
+- msleep(1000);
+-
+- if (PWR_LED(ctrl))
++ if (PWR_LED(ctrl)) {
++ /*
++ * After turning power off, we must wait for at least 1 second
++ * before taking any action that relies on power having been
++ * removed from the slot/adapter.
++ */
++ msleep(1000);
+ /* turn off Green LED */
+ p_slot->hpc_ops->green_led_off(p_slot);
++ }
+
+ return 0;
+ }
diff --git a/freed-ora/current/F-12/linux-2.6-phylib-autoload.patch b/freed-ora/current/F-12/linux-2.6-phylib-autoload.patch
new file mode 100644
index 000000000..2c423a90a
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-phylib-autoload.patch
@@ -0,0 +1,406 @@
+From c413dfa59bf979475a9647cc165f547021efeb27 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw2@infradead.org>
+Date: Wed, 31 Mar 2010 02:10:20 +0100
+Subject: [PATCH 1/2] phylib: Support phy module autoloading
+
+We don't use the normal hotplug mechanism because it doesn't work. It will
+load the module some time after the device appears, but that's not good
+enough for us -- we need the driver loaded _immediately_ because otherwise
+the NIC driver may just abort and then the phy 'device' goes away.
+
+[bwh: s/phy/mdio/ in module alias, kerneldoc for struct mdio_device_id]
+
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+---
+ drivers/net/phy/phy_device.c | 12 ++++++++++++
+ include/linux/mod_devicetable.h | 26 ++++++++++++++++++++++++++
+ include/linux/phy.h | 1 +
+ scripts/mod/file2alias.c | 26 ++++++++++++++++++++++++++
+ 4 files changed, 65 insertions(+), 0 deletions(-)
+
+From 9ddd9886cc89827a4713e9a96614148272fdaa8e Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw2@infradead.org>
+Date: Wed, 31 Mar 2010 02:12:06 +0100
+Subject: [PATCH 2/2] phylib: Add module table to all existing phy drivers
+
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+---
+ drivers/net/phy/bcm63xx.c | 8 ++++++++
+ drivers/net/phy/broadcom.c | 16 ++++++++++++++++
+ drivers/net/phy/cicada.c | 8 ++++++++
+ drivers/net/phy/davicom.c | 9 +++++++++
+ drivers/net/phy/et1011c.c | 7 +++++++
+ drivers/net/phy/icplus.c | 7 +++++++
+ drivers/net/phy/lxt.c | 8 ++++++++
+ drivers/net/phy/marvell.c | 13 +++++++++++++
+ drivers/net/phy/national.c | 7 +++++++
+ drivers/net/phy/qsemi.c | 7 +++++++
+ drivers/net/phy/realtek.c | 7 +++++++
+ drivers/net/phy/smsc.c | 11 +++++++++++
+ drivers/net/phy/ste10Xp.c | 8 ++++++++
+ drivers/net/phy/vitesse.c | 8 ++++++++
+ 14 files changed, 124 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index b10fedd..0db6781 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups);
+ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+ {
+ struct phy_device *dev;
++
+ /* We allocate the device, and initialize the
+ * default values */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+@@ -178,6 +179,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+
+ mutex_init(&dev->lock);
+
++ /* Request the appropriate module unconditionally; don't
++ bother trying to do so only if it isn't already loaded,
++ because that gets complicated. A hotplug event would have
++ done an unconditional modprobe anyway.
++ We don't do normal hotplug because it won't work for MDIO
++ -- because it relies on the device staying around for long
++ enough for the driver to get loaded. With MDIO, the NIC
++ driver will get bored and give up as soon as it finds that
++ there's no driver _already_ loaded. */
++ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
++
+ return dev;
+ }
+ EXPORT_SYMBOL(phy_device_create);
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index f58e9d8..55f1f9c 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -474,4 +474,30 @@ struct platform_device_id {
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+ };
+
++#define MDIO_MODULE_PREFIX "mdio:"
++
++#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
++#define MDIO_ID_ARGS(_id) \
++ (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
++ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
++ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
++ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
++ ((_id)>>15) & 1, ((_id)>>14) & 1, ((_id)>>13) & 1, ((_id)>>12) & 1, \
++ ((_id)>>11) & 1, ((_id)>>10) & 1, ((_id)>>9) & 1, ((_id)>>8) & 1, \
++ ((_id)>>7) & 1, ((_id)>>6) & 1, ((_id)>>5) & 1, ((_id)>>4) & 1, \
++ ((_id)>>3) & 1, ((_id)>>2) & 1, ((_id)>>1) & 1, (_id) & 1
++
++/**
++ * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
++ * @phy_id: The result of
++ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
++ * for this PHY type
++ * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
++ * is used to terminate an array of struct mdio_device_id.
++ */
++struct mdio_device_id {
++ __u32 phy_id;
++ __u32 phy_id_mask;
++};
++
+ #endif /* LINUX_MOD_DEVICETABLE_H */
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index b1368b8..b85de0d 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -24,6 +24,7 @@
+ #include <linux/mii.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
++#include <linux/mod_devicetable.h>
+
+ #include <asm/atomic.h>
+
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 62a9025..05f0c06 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -727,6 +727,28 @@ static int do_platform_entry(const char *filename,
+ return 1;
+ }
+
++static int do_mdio_entry(const char *filename,
++ struct mdio_device_id *id, char *alias)
++{
++ int i;
++
++ alias += sprintf(alias, MDIO_MODULE_PREFIX);
++
++ for (i = 0; i < 32; i++) {
++ if (!((id->phy_id_mask >> (31-i)) & 1))
++ *(alias++) = '?';
++ else if ((id->phy_id >> (31-i)) & 1)
++ *(alias++) = '1';
++ else
++ *(alias++) = '0';
++ }
++
++ /* Terminate the string */
++ *alias = 0;
++
++ return 1;
++}
++
+ /* Ignore any prefix, eg. some architectures prepend _ */
+ static inline int sym_is(const char *symbol, const char *name)
+ {
+@@ -874,6 +896,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ do_table(symval, sym->st_size,
+ sizeof(struct platform_device_id), "platform",
+ do_platform_entry, mod);
++ else if (sym_is(symname, "__mod_mdio_device_table"))
++ do_table(symval, sym->st_size,
++ sizeof(struct mdio_device_id), "mdio",
++ do_mdio_entry, mod);
+ free(zeros);
+ }
+
+diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
+index 4fed95e..ac5e498 100644
+--- a/drivers/net/phy/bcm63xx.c
++++ b/drivers/net/phy/bcm63xx.c
+@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void)
+
+ module_init(bcm63xx_phy_init);
+ module_exit(bcm63xx_phy_exit);
++
++static struct mdio_device_id bcm63xx_tbl[] = {
++ { 0x00406000, 0xfffffc00 },
++ { 0x002bdc00, 0xfffffc00 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, bcm64xx_tbl);
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index f81e532..f46815d 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -830,3 +830,19 @@ static void __exit broadcom_exit(void)
+
+ module_init(broadcom_init);
+ module_exit(broadcom_exit);
++
++static struct mdio_device_id broadcom_tbl[] = {
++ { 0x00206070, 0xfffffff0 },
++ { 0x002060e0, 0xfffffff0 },
++ { 0x002060c0, 0xfffffff0 },
++ { 0x002060b0, 0xfffffff0 },
++ { 0x0143bca0, 0xfffffff0 },
++ { 0x0143bcb0, 0xfffffff0 },
++ { PHY_ID_BCM50610, 0xfffffff0 },
++ { PHY_ID_BCM50610M, 0xfffffff0 },
++ { PHY_ID_BCM57780, 0xfffffff0 },
++ { 0x0143bc70, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
+diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
+index a1bd599..efc608f 100644
+--- a/drivers/net/phy/cicada.c
++++ b/drivers/net/phy/cicada.c
+@@ -159,3 +159,11 @@ static void __exit cicada_exit(void)
+
+ module_init(cicada_init);
+ module_exit(cicada_exit);
++
++static struct mdio_device_id cicada_tbl[] = {
++ { 0x000fc410, 0x000ffff0 },
++ { 0x000fc440, 0x000fffc0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, cicada_tbl);
+diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
+index d926168..e02b18c 100644
+--- a/drivers/net/phy/davicom.c
++++ b/drivers/net/phy/davicom.c
+@@ -219,3 +219,12 @@ static void __exit davicom_exit(void)
+
+ module_init(davicom_init);
+ module_exit(davicom_exit);
++
++static struct mdio_device_id davicom_tbl[] = {
++ { 0x0181b880, 0x0ffffff0 },
++ { 0x0181b8a0, 0x0ffffff0 },
++ { 0x00181b80, 0x0ffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, davicom_tbl);
+diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
+index b031fa2..500f0fd 100644
+--- a/drivers/net/phy/et1011c.c
++++ b/drivers/net/phy/et1011c.c
+@@ -111,3 +111,10 @@ static void __exit et1011c_exit(void)
+
+ module_init(et1011c_init);
+ module_exit(et1011c_exit);
++
++static struct mdio_device_id et1011c_tbl[] = {
++ { 0x0282f014, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, et1011c_tbl);
+diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
+index af3f1f2..e661e90 100644
+--- a/drivers/net/phy/icplus.c
++++ b/drivers/net/phy/icplus.c
+@@ -132,3 +132,10 @@ static void __exit ip175c_exit(void)
+
+ module_init(ip175c_init);
+ module_exit(ip175c_exit);
++
++static struct mdio_device_id icplus_tbl[] = {
++ { 0x02430d80, 0x0ffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, icplus_tbl);
+diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
+index 4cf3324..1d94f1d 100644
+--- a/drivers/net/phy/lxt.c
++++ b/drivers/net/phy/lxt.c
+@@ -174,3 +174,11 @@ static void __exit lxt_exit(void)
+
+ module_init(lxt_init);
+ module_exit(lxt_exit);
++
++static struct mdio_device_id lxt_tbl[] = {
++ { 0x78100000, 0xfffffff0 },
++ { 0x001378e0, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, lxt_tbl);
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 6f69b9b..4e58b2c 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -611,3 +611,16 @@ static void __exit marvell_exit(void)
+
+ module_init(marvell_init);
+ module_exit(marvell_exit);
++
++static struct mdio_device_id marvell_tbl[] = {
++ { 0x01410c60, 0xfffffff0 },
++ { 0x01410c90, 0xfffffff0 },
++ { 0x01410cc0, 0xfffffff0 },
++ { 0x01410e10, 0xfffffff0 },
++ { 0x01410cb0, 0xfffffff0 },
++ { 0x01410cd0, 0xfffffff0 },
++ { 0x01410e30, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, marvell_tbl);
+diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
+index 6c636eb..729ab29 100644
+--- a/drivers/net/phy/national.c
++++ b/drivers/net/phy/national.c
+@@ -153,3 +153,10 @@ MODULE_LICENSE("GPL");
+
+ module_init(ns_init);
+ module_exit(ns_exit);
++
++static struct mdio_device_id ns_tbl[] = {
++ { DP83865_PHY_ID, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, ns_tbl);
+diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
+index 23062d0..3ec9610 100644
+--- a/drivers/net/phy/qsemi.c
++++ b/drivers/net/phy/qsemi.c
+@@ -138,3 +138,10 @@ static void __exit qs6612_exit(void)
+
+ module_init(qs6612_init);
+ module_exit(qs6612_exit);
++
++static struct mdio_device_id qs6612_tbl[] = {
++ { 0x00181440, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, qs6612_tbl);
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index a052a67..f567c0e 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -78,3 +78,10 @@ static void __exit realtek_exit(void)
+
+ module_init(realtek_init);
+ module_exit(realtek_exit);
++
++static struct mdio_device_id realtek_tbl[] = {
++ { 0x001cc912, 0x001fffff },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, realtek_tbl);
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index 5123bb9..9fb14b4 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -236,3 +236,14 @@ MODULE_LICENSE("GPL");
+
+ module_init(smsc_init);
+ module_exit(smsc_exit);
++
++static struct mdio_device_id smsc_tbl[] = {
++ { 0x0007c0a0, 0xfffffff0 },
++ { 0x0007c0b0, 0xfffffff0 },
++ { 0x0007c0c0, 0xfffffff0 },
++ { 0x0007c0d0, 0xfffffff0 },
++ { 0x0007c0f0, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, smsc_tbl);
+diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
+index 6bdb0d5..7229009 100644
+--- a/drivers/net/phy/ste10Xp.c
++++ b/drivers/net/phy/ste10Xp.c
+@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void)
+ module_init(ste10Xp_init);
+ module_exit(ste10Xp_exit);
+
++static struct mdio_device_id ste10Xp_tbl[] = {
++ { STE101P_PHY_ID, 0xfffffff0 },
++ { STE100P_PHY_ID, 0xffffffff },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl);
++
+ MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
+ MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
+index dd3b244..45cce50 100644
+--- a/drivers/net/phy/vitesse.c
++++ b/drivers/net/phy/vitesse.c
+@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void)
+
+ module_init(vsc82xx_init);
+ module_exit(vsc82xx_exit);
++
++static struct mdio_device_id vitesse_tbl[] = {
++ { PHY_ID_VSC8244, 0x000fffc0 },
++ { PHY_ID_VSC8221, 0x000ffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, vitesse_tbl);
+--
+1.6.6.1
+
diff --git a/freed-ora/current/F-12/linux-2.6-ps3-storage-alias.patch b/freed-ora/current/F-12/linux-2.6-ps3-storage-alias.patch
new file mode 100644
index 000000000..ceb6519ab
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-ps3-storage-alias.patch
@@ -0,0 +1,7 @@
+--- linux-2.6.22.ppc64/drivers/block/ps3disk.c~ 2007-07-25 16:06:16.000000000 +0100
++++ linux-2.6.22.ppc64/drivers/block/ps3disk.c 2007-07-26 08:49:44.000000000 +0100
+@@ -628,3 +628,4 @@ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("PS3 Disk Storage Driver");
+ MODULE_AUTHOR("Sony Corporation");
+ MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_DISK);
++MODULE_ALIAS("ps3_storage");
diff --git a/freed-ora/current/F-12/linux-2.6-revert-dvb-net-kabi-change.patch b/freed-ora/current/F-12/linux-2.6-revert-dvb-net-kabi-change.patch
new file mode 100644
index 000000000..28d55e087
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-revert-dvb-net-kabi-change.patch
@@ -0,0 +1,149 @@
+diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
+index f6ba846..03fd9dd 100644
+--- a/drivers/media/dvb/dvb-core/dvb_net.c
++++ b/drivers/media/dvb/dvb-core/dvb_net.c
+@@ -125,6 +125,7 @@ static void hexdump( const unsigned char *buf, unsigned short len )
+
+ struct dvb_net_priv {
+ int in_use;
++ struct net_device_stats stats;
+ u16 pid;
+ struct net_device *net;
+ struct dvb_net *host;
+@@ -383,8 +384,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
+ if (priv->ule_skb) {
+ dev_kfree_skb( priv->ule_skb );
+ /* Prepare for next SNDU. */
+- dev->stats.rx_errors++;
+- dev->stats.rx_frame_errors++;
++ priv->stats.rx_errors++;
++ priv->stats.rx_frame_errors++;
+ }
+ reset_ule(priv);
+ priv->need_pusi = 1;
+@@ -437,8 +438,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
+ dev_kfree_skb( priv->ule_skb );
+ /* Prepare for next SNDU. */
+ // reset_ule(priv); moved to below.
+- dev->stats.rx_errors++;
+- dev->stats.rx_frame_errors++;
++ priv->stats.rx_errors++;
++ priv->stats.rx_frame_errors++;
+ }
+ reset_ule(priv);
+ /* skip to next PUSI. */
+@@ -459,8 +460,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
+ /* Drop partly decoded SNDU, reset state, resync on PUSI. */
+ if (priv->ule_skb) {
+ dev_kfree_skb( priv->ule_skb );
+- dev->stats.rx_errors++;
+- dev->stats.rx_frame_errors++;
++ priv->stats.rx_errors++;
++ priv->stats.rx_frame_errors++;
+ }
+ reset_ule(priv);
+ priv->need_pusi = 1;
+@@ -476,8 +477,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
+ if (priv->ule_sndu_remain > 183) {
+ /* Current SNDU lacks more data than there could be available in the
+ * current TS cell. */
+- dev->stats.rx_errors++;
+- dev->stats.rx_length_errors++;
++ priv->stats.rx_errors++;
++ priv->stats.rx_length_errors++;
+ printk(KERN_WARNING "%lu: Expected %d more SNDU bytes, but "
+ "got PUSI (pf %d, ts_remain %d). Flushing incomplete payload.\n",
+ priv->ts_count, priv->ule_sndu_remain, ts[4], ts_remain);
+@@ -519,8 +520,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
+ if (priv->ule_sndu_len < 5) {
+ printk(KERN_WARNING "%lu: Invalid ULE SNDU length %u. "
+ "Resyncing.\n", priv->ts_count, priv->ule_sndu_len);
+- dev->stats.rx_errors++;
+- dev->stats.rx_length_errors++;
++ priv->stats.rx_errors++;
++ priv->stats.rx_length_errors++;
+ priv->ule_sndu_len = 0;
+ priv->need_pusi = 1;
+ new_ts = 1;
+@@ -572,7 +573,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
+ if (priv->ule_skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+- dev->stats.rx_dropped++;
++ priv->stats.rx_dropped++;
+ return;
+ }
+
+@@ -636,8 +637,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
+ ule_dump = 1;
+ #endif
+
+- dev->stats.rx_errors++;
+- dev->stats.rx_crc_errors++;
++ priv->stats.rx_errors++;
++ priv->stats.rx_crc_errors++;
+ dev_kfree_skb(priv->ule_skb);
+ } else {
+ /* CRC32 verified OK. */
+@@ -743,8 +744,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
+ * receive the packet anyhow. */
+ /* if (priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
+ priv->ule_skb->pkt_type = PACKET_HOST; */
+- dev->stats.rx_packets++;
+- dev->stats.rx_bytes += priv->ule_skb->len;
++ priv->stats.rx_packets++;
++ priv->stats.rx_bytes += priv->ule_skb->len;
+ netif_rx(priv->ule_skb);
+ }
+ sndu_done:
+@@ -799,7 +800,8 @@ static void dvb_net_sec(struct net_device *dev,
+ {
+ u8 *eth;
+ struct sk_buff *skb;
+- struct net_device_stats *stats = &dev->stats;
++ struct net_device_stats *stats =
++ &((struct dvb_net_priv *) netdev_priv(dev))->stats;
+ int snap = 0;
+
+ /* note: pkt_len includes a 32bit checksum */
+@@ -1214,29 +1216,28 @@ static int dvb_net_stop(struct net_device *dev)
+ return dvb_net_feed_stop(dev);
+ }
+
++static struct net_device_stats * dvb_net_get_stats(struct net_device *dev)
++{
++ return &((struct dvb_net_priv *) netdev_priv(dev))->stats;
++}
++
+ static const struct header_ops dvb_header_ops = {
+ .create = eth_header,
+ .parse = eth_header_parse,
+ .rebuild = eth_rebuild_header,
+ };
+
+-
+-static const struct net_device_ops dvb_netdev_ops = {
+- .ndo_open = dvb_net_open,
+- .ndo_stop = dvb_net_stop,
+- .ndo_start_xmit = dvb_net_tx,
+- .ndo_set_multicast_list = dvb_net_set_multicast_list,
+- .ndo_set_mac_address = dvb_net_set_mac,
+- .ndo_change_mtu = eth_change_mtu,
+- .ndo_validate_addr = eth_validate_addr,
+-};
+-
+ static void dvb_net_setup(struct net_device *dev)
+ {
+ ether_setup(dev);
+
+ dev->header_ops = &dvb_header_ops;
+- dev->netdev_ops = &dvb_netdev_ops;
++ dev->open = dvb_net_open;
++ dev->stop = dvb_net_stop;
++ dev->hard_start_xmit = dvb_net_tx;
++ dev->get_stats = dvb_net_get_stats;
++ dev->set_multicast_list = dvb_net_set_multicast_list;
++ dev->set_mac_address = dvb_net_set_mac;
+ dev->mtu = 4096;
+ dev->mc_count = 0;
+
diff --git a/freed-ora/current/F-12/linux-2.6-rfkill-all.patch b/freed-ora/current/F-12/linux-2.6-rfkill-all.patch
new file mode 100644
index 000000000..cd1db3e93
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-rfkill-all.patch
@@ -0,0 +1,52 @@
+diff --git a/include/linux/input.h b/include/linux/input.h
+index 8b3bc3e..20a622e 100644
+--- a/include/linux/input.h
++++ b/include/linux/input.h
+@@ -595,6 +595,8 @@ struct input_absinfo {
+ #define KEY_NUMERIC_STAR 0x20a
+ #define KEY_NUMERIC_POUND 0x20b
+
++#define KEY_RFKILL 0x20c /* Key that controls all radios */
++
+ /* We avoid low common keys in module aliases so they don't get huge. */
+ #define KEY_MIN_INTERESTING KEY_MUTE
+ #define KEY_MAX 0x2ff
+diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
+index 278777f..4c39f7e 100644
+--- a/include/linux/rfkill.h
++++ b/include/linux/rfkill.h
+@@ -32,7 +32,7 @@
+ /**
+ * enum rfkill_type - type of rfkill switch.
+ *
+- * @RFKILL_TYPE_ALL: toggles all switches (userspace only)
++ * @RFKILL_TYPE_ALL: toggles all switches (requests only - not a switch type)
+ * @RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device.
+ * @RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device.
+ * @RFKILL_TYPE_UWB: switch is on a ultra wideband device.
+diff --git a/net/rfkill/input.c b/net/rfkill/input.c
+index a7295ad..3713d7e 100644
+--- a/net/rfkill/input.c
++++ b/net/rfkill/input.c
+@@ -212,6 +212,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type,
+ case KEY_WIMAX:
+ rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
+ break;
++ case KEY_RFKILL:
++ rfkill_schedule_toggle(RFKILL_TYPE_ALL);
++ break;
+ }
+ } else if (type == EV_SW && code == SW_RFKILL_ALL)
+ rfkill_schedule_evsw_rfkillall(data);
+@@ -295,6 +298,11 @@ static const struct input_device_id rfkill_ids[] = {
+ .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
+ },
+ {
++ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
++ .evbit = { BIT_MASK(EV_KEY) },
++ .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
++ },
++ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
+ .evbit = { BIT(EV_SW) },
+ .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
diff --git a/freed-ora/current/F-12/linux-2.6-selinux-mprotect-checks.patch b/freed-ora/current/F-12/linux-2.6-selinux-mprotect-checks.patch
new file mode 100644
index 000000000..175252537
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-selinux-mprotect-checks.patch
@@ -0,0 +1,41 @@
+This needs a fixed toolchain, and a userspace rebuild to work.
+For these reasons, it's had difficulty getting upstream.
+
+ie, Fedora has a new enough toolchain, and has been rebuilt, so we don't need
+the ifdefs. Other distros don't/haven't, and this patch would break them
+if pushed upstream.
+
+--- linux-2.6.26.noarch/security/selinux/hooks.c~ 2008-09-25 14:11:17.000000000 -0400
++++ linux-2.6.26.noarch/security/selinux/hooks.c 2008-09-25 14:12:17.000000000 -0400
+@@ -3018,7 +3018,6 @@ static int file_map_prot_check(struct fi
+ const struct cred *cred = current_cred();
+ int rc = 0;
+
+-#ifndef CONFIG_PPC32
+ if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
+ /*
+ * We are making executable an anonymous mapping or a
+@@ -3029,7 +3028,6 @@ static int file_map_prot_check(struct fi
+ if (rc)
+ goto error;
+ }
+-#endif
+
+ if (file) {
+ /* read access is always possible with a mapping */
+@@ -3024,7 +3022,6 @@ static int selinux_file_mprotect(struct
+ if (selinux_checkreqprot)
+ prot = reqprot;
+
+-#ifndef CONFIG_PPC32
+ if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
+ rc = 0;
+ if (vma->vm_start >= vma->vm_mm->start_brk &&
+@@ -3049,7 +3046,6 @@ static int selinux_file_mprotect(struct
+ if (rc)
+ return rc;
+ }
+-#endif
+
+ return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED);
+ }
diff --git a/freed-ora/current/F-12/linux-2.6-serial-460800.patch b/freed-ora/current/F-12/linux-2.6-serial-460800.patch
new file mode 100644
index 000000000..17d67ef64
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-serial-460800.patch
@@ -0,0 +1,70 @@
+diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
+index 2209620..659c1bb 100644
+--- a/drivers/serial/8250.c
++++ b/drivers/serial/8250.c
+@@ -7,6 +7,9 @@
+ *
+ * Copyright (C) 2001 Russell King.
+ *
++ * 2005/09/16: Enabled higher baud rates for 16C95x.
++ * (Mathias Adam <a2@adamis.de>)
++ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+@@ -2227,6 +2230,14 @@ static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int
+ else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+ baud == (port->uartclk/8))
+ quot = 0x8002;
++ /*
++ * For 16C950s UART_TCR is used in combination with divisor==1
++ * to achieve baud rates up to baud_base*4.
++ */
++ else if ((port->type == PORT_16C950) &&
++ baud > (port->uartclk/16))
++ quot = 1;
++
+ else
+ quot = uart_get_divisor(port, baud);
+
+@@ -2240,7 +2251,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct uart_8250_port *up = (struct uart_8250_port *)port;
+ unsigned char cval, fcr = 0;
+ unsigned long flags;
+- unsigned int baud, quot;
++ unsigned int baud, quot, max_baud;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+@@ -2272,9 +2283,10 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
++ max_baud = (up->port.type == PORT_16C950 ? port->uartclk/4 : port->uartclk/16);
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk / 16 / 0xffff,
+- port->uartclk / 16);
++ max_baud);
+ quot = serial8250_get_divisor(port, baud);
+
+ /*
+@@ -2311,6 +2323,19 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
++ * 16C950 supports additional prescaler ratios between 1:16 and 1:4
++ * thus increasing max baud rate to uartclk/4.
++ */
++ if (up->port.type == PORT_16C950) {
++ if (baud == port->uartclk/4)
++ serial_icr_write(up, UART_TCR, 0x4);
++ else if (baud == port->uartclk/8)
++ serial_icr_write(up, UART_TCR, 0x8);
++ else
++ serial_icr_write(up, UART_TCR, 0);
++ }
++
++ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
diff --git a/freed-ora/current/F-12/linux-2.6-silence-acpi-blacklist.patch b/freed-ora/current/F-12/linux-2.6-silence-acpi-blacklist.patch
new file mode 100644
index 000000000..c5997bb6e
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-silence-acpi-blacklist.patch
@@ -0,0 +1,25 @@
+diff -up linux-2.6.26.noarch/drivers/acpi/blacklist.c.jx linux-2.6.26.noarch/drivers/acpi/blacklist.c
+--- linux-2.6.26.noarch/drivers/acpi/blacklist.c.jx 2008-07-13 17:51:29.000000000 -0400
++++ linux-2.6.26.noarch/drivers/acpi/blacklist.c 2008-08-12 14:21:39.000000000 -0400
+@@ -81,18 +81,18 @@ static int __init blacklist_by_year(void
+
+ /* Doesn't exist? Likely an old system */
+ if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) {
+- printk(KERN_ERR PREFIX "no DMI BIOS year, "
++ printk(KERN_INFO PREFIX "no DMI BIOS year, "
+ "acpi=force is required to enable ACPI\n" );
+ return 1;
+ }
+ /* 0? Likely a buggy new BIOS */
+ if (year == 0) {
+- printk(KERN_ERR PREFIX "DMI BIOS year==0, "
++ printk(KERN_INFO PREFIX "DMI BIOS year==0, "
+ "assuming ACPI-capable machine\n" );
+ return 0;
+ }
+ if (year < CONFIG_ACPI_BLACKLIST_YEAR) {
+- printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), "
++ printk(KERN_INFO PREFIX "BIOS age (%d) fails cutoff (%d), "
+ "acpi=force is required to enable ACPI\n",
+ year, CONFIG_ACPI_BLACKLIST_YEAR);
+ return 1;
diff --git a/freed-ora/current/F-12/linux-2.6-silence-fbcon-logo.patch b/freed-ora/current/F-12/linux-2.6-silence-fbcon-logo.patch
new file mode 100644
index 000000000..45ab73331
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-silence-fbcon-logo.patch
@@ -0,0 +1,42 @@
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 1657b96..4c5c2be 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -631,13 +631,15 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
+ kfree(save);
+ }
+
+- if (logo_lines > vc->vc_bottom) {
+- logo_shown = FBCON_LOGO_CANSHOW;
+- printk(KERN_INFO
+- "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
+- } else if (logo_shown != FBCON_LOGO_DONTSHOW) {
+- logo_shown = FBCON_LOGO_DRAW;
+- vc->vc_top = logo_lines;
++ if (logo_shown != FBCON_LOGO_DONTSHOW) {
++ if (logo_lines > vc->vc_bottom) {
++ logo_shown = FBCON_LOGO_CANSHOW;
++ printk(KERN_INFO
++ "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
++ } else {
++ logo_shown = FBCON_LOGO_DRAW;
++ vc->vc_top = logo_lines;
++ }
+ }
+ }
+ #endif /* MODULE */
+@@ -3489,6 +3491,14 @@ static int __init fb_console_init(void)
+ return 0;
+ }
+
++static int __init quiet_logo(char *str)
++{
++ logo_shown = FBCON_LOGO_DONTSHOW;
++ return 0;
++}
++
++early_param("quiet", quiet_logo);
++
+ module_init(fb_console_init);
+
+ #ifdef MODULE
diff --git a/freed-ora/current/F-12/linux-2.6-silence-noise.patch b/freed-ora/current/F-12/linux-2.6-silence-noise.patch
new file mode 100644
index 000000000..119a97769
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-silence-noise.patch
@@ -0,0 +1,66 @@
+--- linux-2.6.26.noarch/drivers/base/power/main.c~ 2008-08-22 20:57:57.000000000 -0400
++++ linux-2.6.26.noarch/drivers/base/power/main.c 2008-08-22 20:58:05.000000000 -0400
+@@ -69,9 +69,6 @@ void device_pm_unlock(void)
+ */
+ void device_pm_add(struct device *dev)
+ {
+- pr_debug("PM: Adding info for %s:%s\n",
+- dev->bus ? dev->bus->name : "No Bus",
+- kobject_name(&dev->kobj));
+ mutex_lock(&dpm_list_mtx);
+ if (dev->parent) {
+ if (dev->parent->power.status >= DPM_SUSPENDING)
+From b4e96f34c17e5a79cd28774cc722bb33e7e02c6e Mon Sep 17 00:00:00 2001
+From: Peter Jones <pjones@redhat.com>
+Date: Thu, 25 Sep 2008 16:23:33 -0400
+Subject: [PATCH] Don't print an error message just because there's no i8042 chip.
+
+Some systems, such as EFI-based Apple systems, won't necessarily have an
+i8042 to initialize. We shouldn't be printing an error message in this
+case, since not detecting the chip is the correct behavior.
+---
+ drivers/input/serio/i8042.c | 4 +---
+ 1 files changed, 1 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 170f71e..4f3e632 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -701,10 +701,8 @@ static int __devinit i8042_check_aux(void)
+
+ static int i8042_controller_check(void)
+ {
+- if (i8042_flush() == I8042_BUFFER_SIZE) {
+- printk(KERN_ERR "i8042.c: No controller found.\n");
++ if (i8042_flush() == I8042_BUFFER_SIZE)
+ return -ENODEV;
+- }
+
+ return 0;
+ }
+--
+1.6.0.1
+
+Socket fuzzers like sfuzz will trigger this printk a lot, even though it's
+ratelimited. It isn't particularly useful, so just remove it.
+
+Signed-off-by: Dave Jones <davej@redhat.com>
+
+--- linux-2.6.27.noarch/net/can/af_can.c~ 2008-12-11 16:53:48.000000000 -0500
++++ linux-2.6.27.noarch/net/can/af_can.c 2008-12-11 16:54:42.000000000 -0500
+@@ -134,13 +134,9 @@ static int can_create(struct net *net, s
+ err = request_module("can-proto-%d", protocol);
+
+ /*
+- * In case of error we only print a message but don't
+- * return the error code immediately. Below we will
+- * return -EPROTONOSUPPORT
++ * In case of error we don't return the error code immediately.
++ * Below we will return -EPROTONOSUPPORT
+ */
+- if (err && printk_ratelimit())
+- printk(KERN_ERR "can: request_module "
+- "(can-proto-%d) failed.\n", protocol);
+ }
+ #endif
+
diff --git a/freed-ora/current/F-12/linux-2.6-sparc-selinux-mprotect-checks.patch b/freed-ora/current/F-12/linux-2.6-sparc-selinux-mprotect-checks.patch
new file mode 100644
index 000000000..30d3689c9
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-sparc-selinux-mprotect-checks.patch
@@ -0,0 +1,21 @@
+diff -up linux-2.6.32.noarch/security/selinux/hooks.c.mprotect-sparc linux-2.6.32.noarch/security/selinux/hooks.c
+--- linux-2.6.32.noarch/security/selinux/hooks.c.mprotect-sparc 2010-03-10 08:28:20.957571926 -0500
++++ linux-2.6.32.noarch/security/selinux/hooks.c 2010-03-10 08:29:15.732698763 -0500
+@@ -3010,7 +3010,7 @@ static int file_map_prot_check(struct fi
+ const struct cred *cred = current_cred();
+ int rc = 0;
+
+-#ifndef CONFIG_PPC32
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_SPARC)
+ if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
+ /*
+ * We are making executable an anonymous mapping or a
+@@ -3082,7 +3082,7 @@ static int selinux_file_mprotect(struct
+ if (selinux_checkreqprot)
+ prot = reqprot;
+
+-#ifndef CONFIG_PPC32
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_SPARC)
+ if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
+ int rc = 0;
+ if (vma->vm_start >= vma->vm_mm->start_brk &&
diff --git a/freed-ora/current/F-12/linux-2.6-tracehook.patch b/freed-ora/current/F-12/linux-2.6-tracehook.patch
new file mode 100644
index 000000000..7f6c05ae3
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-tracehook.patch
@@ -0,0 +1,368 @@
+From: Oleg Nesterov <oleg@redhat.com>
+
+[PATCH] signals: check ->group_stop_count after tracehook_get_signal()
+
+Move the call to do_signal_stop() down, after tracehook call.
+This makes ->group_stop_count condition visible to tracers before
+do_signal_stop() will participate in this group-stop.
+
+Currently the patch has no effect, tracehook_get_signal() always
+returns 0.
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Roland McGrath <roland@redhat.com>
+---
+ arch/powerpc/include/asm/ptrace.h | 2 +
+ arch/powerpc/kernel/traps.c | 9 ++++++
+ arch/s390/kernel/traps.c | 6 ++--
+ arch/x86/include/asm/ptrace.h | 2 +
+ arch/x86/kernel/ptrace.c | 51 ++++++++++++++++++++----------------
+ include/linux/ptrace.h | 24 +++++++++++------
+ include/linux/sched.h | 1 +
+ include/linux/tracehook.h | 15 ++++++++---
+ kernel/ptrace.c | 2 +-
+ kernel/signal.c | 13 ++++-----
+ 10 files changed, 79 insertions(+), 46 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
+index 8c34149..cbd759e 100644
+--- a/arch/powerpc/include/asm/ptrace.h
++++ b/arch/powerpc/include/asm/ptrace.h
+@@ -140,6 +140,8 @@ extern void user_enable_single_step(stru
+ extern void user_enable_block_step(struct task_struct *);
+ extern void user_disable_single_step(struct task_struct *);
+
++#define ARCH_HAS_USER_SINGLE_STEP_INFO
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 6f0ae1a..83b57ac 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -174,6 +174,15 @@ int die(const char *str, struct pt_regs
+ return 0;
+ }
+
++void user_single_step_siginfo(struct task_struct *tsk,
++ struct pt_regs *regs, siginfo_t *info)
++{
++ memset(info, 0, sizeof(*info));
++ info->si_signo = SIGTRAP;
++ info->si_code = TRAP_TRACE;
++ info->si_addr = (void __user *)regs->nip;
++}
++
+ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
+ {
+ siginfo_t info;
+diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
+index c2e42cc..6e7ad63 100644
+--- a/arch/s390/kernel/traps.c
++++ b/arch/s390/kernel/traps.c
+@@ -18,7 +18,7 @@
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/errno.h>
+-#include <linux/ptrace.h>
++#include <linux/tracehook.h>
+ #include <linux/timer.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+@@ -382,7 +382,7 @@ void __kprobes do_single_step(struct pt_
+ SIGTRAP) == NOTIFY_STOP){
+ return;
+ }
+- if ((current->ptrace & PT_PTRACED) != 0)
++ if (tracehook_consider_fatal_signal(current, SIGTRAP))
+ force_sig(SIGTRAP, current);
+ }
+
+@@ -483,7 +483,7 @@ static void illegal_op(struct pt_regs *
+ if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
+ return;
+ if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
+- if (current->ptrace & PT_PTRACED)
++ if (tracehook_consider_fatal_signal(current, SIGTRAP))
+ force_sig(SIGTRAP, current);
+ else
+ signal = SIGILL;
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 0f0d908..7a88a82 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -230,6 +230,8 @@ extern void user_enable_block_step(struc
+ #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
+ #endif
+
++#define ARCH_HAS_USER_SINGLE_STEP_INFO
++
+ struct user_desc;
+ extern int do_get_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *info);
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 7b058a2..ea35dee 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -1437,21 +1437,33 @@ const struct user_regset_view *task_user
+ #endif
+ }
+
+-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+- int error_code, int si_code)
++static void fill_sigtrap_info(struct task_struct *tsk,
++ struct pt_regs *regs,
++ int error_code, int si_code,
++ struct siginfo *info)
+ {
+- struct siginfo info;
+-
+ tsk->thread.trap_no = 1;
+ tsk->thread.error_code = error_code;
+
+- memset(&info, 0, sizeof(info));
+- info.si_signo = SIGTRAP;
+- info.si_code = si_code;
++ memset(info, 0, sizeof(*info));
++ info->si_signo = SIGTRAP;
++ info->si_code = si_code;
++ info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
++}
+
+- /* User-mode ip? */
+- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
++void user_single_step_siginfo(struct task_struct *tsk,
++ struct pt_regs *regs,
++ struct siginfo *info)
++{
++ fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
++}
++
++void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
++ int error_code, int si_code)
++{
++ struct siginfo info;
+
++ fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
+ /* Send us the fake SIGTRAP */
+ force_sig_info(SIGTRAP, &info, tsk);
+ }
+@@ -1516,29 +1528,22 @@ asmregparm long syscall_trace_enter(stru
+
+ asmregparm void syscall_trace_leave(struct pt_regs *regs)
+ {
++ bool step;
++
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->ax);
+
+- if (test_thread_flag(TIF_SYSCALL_TRACE))
+- tracehook_report_syscall_exit(regs, 0);
+-
+ /*
+ * If TIF_SYSCALL_EMU is set, we only get here because of
+ * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
+ * We already reported this syscall instruction in
+- * syscall_trace_enter(), so don't do any more now.
+- */
+- if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
+- return;
+-
+- /*
+- * If we are single-stepping, synthesize a trap to follow the
+- * system call instruction.
++ * syscall_trace_enter().
+ */
+- if (test_thread_flag(TIF_SINGLESTEP) &&
+- tracehook_consider_fatal_signal(current, SIGTRAP))
+- send_sigtrap(current, regs, 0, TRAP_BRKPT);
++ step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
++ !test_thread_flag(TIF_SYSCALL_EMU);
++ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall_exit(regs, step);
+ }
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 7456d7d..4802e2a 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -85,6 +85,7 @@ extern int ptrace_traceme(void);
+ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
+ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
+ extern int ptrace_attach(struct task_struct *tsk);
++extern bool __ptrace_detach(struct task_struct *tracer, struct task_struct *tracee);
+ extern int ptrace_detach(struct task_struct *, unsigned int);
+ extern void ptrace_disable(struct task_struct *);
+ extern int ptrace_check_attach(struct task_struct *task, int kill);
+@@ -105,12 +106,7 @@ static inline int ptrace_reparented(stru
+ {
+ return child->real_parent != child->parent;
+ }
+-static inline void ptrace_link(struct task_struct *child,
+- struct task_struct *new_parent)
+-{
+- if (unlikely(child->ptrace))
+- __ptrace_link(child, new_parent);
+-}
++
+ static inline void ptrace_unlink(struct task_struct *child)
+ {
+ if (unlikely(child->ptrace))
+@@ -169,9 +165,9 @@ static inline void ptrace_init_task(stru
+ INIT_LIST_HEAD(&child->ptraced);
+ child->parent = child->real_parent;
+ child->ptrace = 0;
+- if (unlikely(ptrace)) {
++ if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
+ child->ptrace = current->ptrace;
+- ptrace_link(child, current->parent);
++ __ptrace_link(child, current->parent);
+ }
+ }
+
+@@ -278,6 +274,18 @@ static inline void user_enable_block_ste
+ }
+ #endif /* arch_has_block_step */
+
++#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
++extern void user_single_step_siginfo(struct task_struct *tsk,
++ struct pt_regs *regs, siginfo_t *info);
++#else
++static inline void user_single_step_siginfo(struct task_struct *tsk,
++ struct pt_regs *regs, siginfo_t *info)
++{
++ memset(info, 0, sizeof(*info));
++ info->si_signo = SIGTRAP;
++}
++#endif
++
+ #ifndef arch_ptrace_stop_needed
+ /**
+ * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 75e6e60..6c8928b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2060,6 +2060,7 @@ extern int kill_pgrp(struct pid *pid, in
+ extern int kill_pid(struct pid *pid, int sig, int priv);
+ extern int kill_proc_info(int, struct siginfo *, pid_t);
+ extern int do_notify_parent(struct task_struct *, int);
++extern void do_notify_parent_cldstop(struct task_struct *, int);
+ extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
+ extern void force_sig(int, struct task_struct *);
+ extern void force_sig_specific(int, struct task_struct *);
+diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
+index 1eb44a9..c78b2f4 100644
+--- a/include/linux/tracehook.h
++++ b/include/linux/tracehook.h
+@@ -134,6 +134,13 @@ static inline __must_check int tracehook
+ */
+ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
+ {
++ if (step && (task_ptrace(current) & PT_PTRACED)) {
++ siginfo_t info;
++ user_single_step_siginfo(current, regs, &info);
++ force_sig_info(SIGTRAP, &info, current);
++ return;
++ }
++
+ ptrace_report_syscall(regs);
+ }
+
+@@ -149,7 +156,7 @@ static inline int tracehook_unsafe_exec(
+ {
+ int unsafe = 0;
+ int ptrace = task_ptrace(task);
+- if (ptrace & PT_PTRACED) {
++ if (ptrace) {
+ if (ptrace & PT_PTRACE_CAP)
+ unsafe |= LSM_UNSAFE_PTRACE_CAP;
+ else
+@@ -171,7 +178,7 @@ static inline int tracehook_unsafe_exec(
+ */
+ static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
+ {
+- if (task_ptrace(tsk) & PT_PTRACED)
++ if (task_ptrace(tsk))
+ return rcu_dereference(tsk->parent);
+ return NULL;
+ }
+@@ -379,7 +386,7 @@ static inline void tracehook_signal_hand
+ const struct k_sigaction *ka,
+ struct pt_regs *regs, int stepping)
+ {
+- if (stepping)
++ if (stepping && (task_ptrace(current) & PT_PTRACED))
+ ptrace_notify(SIGTRAP);
+ }
+
+@@ -485,7 +492,7 @@ static inline int tracehook_get_signal(s
+ */
+ static inline int tracehook_notify_jctl(int notify, int why)
+ {
+- return notify ?: (current->ptrace & PT_PTRACED) ? why : 0;
++ return notify ?: task_ptrace(current) ? why : 0;
+ }
+
+ /**
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 23bd09c..b7c1d32 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -271,7 +271,7 @@ static int ignoring_children(struct sigh
+ * reap it now, in that case we must also wake up sub-threads sleeping in
+ * do_wait().
+ */
+-static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
++bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
+ {
+ __ptrace_unlink(p);
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 6705320..9908335 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1461,7 +1461,7 @@ int do_notify_parent(struct task_struct
+ return ret;
+ }
+
+-static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
++void do_notify_parent_cldstop(struct task_struct *tsk, int why)
+ {
+ struct siginfo info;
+ unsigned long flags;
+@@ -1731,7 +1731,7 @@ static int do_signal_stop(int signr)
+ static int ptrace_signal(int signr, siginfo_t *info,
+ struct pt_regs *regs, void *cookie)
+ {
+- if (!task_ptrace(current))
++ if (!(task_ptrace(current) & PT_PTRACED))
+ return signr;
+
+ ptrace_signal_deliver(regs, cookie);
+@@ -1807,11 +1807,6 @@ relock:
+
+ for (;;) {
+ struct k_sigaction *ka;
+-
+- if (unlikely(signal->group_stop_count > 0) &&
+- do_signal_stop(0))
+- goto relock;
+-
+ /*
+ * Tracing can induce an artifical signal and choose sigaction.
+ * The return value in @signr determines the default action,
+@@ -1823,6 +1818,10 @@ relock:
+ if (unlikely(signr != 0))
+ ka = return_ka;
+ else {
++ if (unlikely(signal->group_stop_count > 0) &&
++ do_signal_stop(0))
++ goto relock;
++
+ signr = dequeue_signal(current, &current->blocked,
+ info);
+
diff --git a/freed-ora/current/F-12/linux-2.6-upstream-reverts.patch b/freed-ora/current/F-12/linux-2.6-upstream-reverts.patch
new file mode 100644
index 000000000..81f7983a4
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-upstream-reverts.patch
@@ -0,0 +1,2436 @@
+From 7b9c5abee98c54f85bcc04bd4d7ec8d5094c73f4 Mon Sep 17 00:00:00 2001
+From: Jesse Barnes <jbarnes@virtuousgeek.org>
+Date: Fri, 12 Feb 2010 09:30:00 -0800
+Subject: drm/i915: give up on 8xx lid status
+
+From: Jesse Barnes <jbarnes@virtuousgeek.org>
+
+commit 7b9c5abee98c54f85bcc04bd4d7ec8d5094c73f4 upstream.
+
+These old machines more often than not lie about their lid state. So
+don't use it to detect LVDS presence, but leave the event handler to
+deal with lid open/close, when we might need to reset the mode.
+
+Fixes kernel bug #15248
+
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_lvds.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -648,8 +648,12 @@ static const struct dmi_system_id bad_li
+ */
+ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+ {
++ struct drm_device *dev = connector->dev;
+ enum drm_connector_status status = connector_status_connected;
+
++ if (IS_I8XX(dev))
++ return connector_status_connected;
++
+ if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+ status = connector_status_disconnected;
+
+From 6363ee6f496eb7e3b3f78dc105e522c7b496089b Mon Sep 17 00:00:00 2001
+From: Zhao Yakui <yakui.zhao@intel.com>
+Date: Tue, 24 Nov 2009 09:48:44 +0800
+Subject: drm/i915: parse child device from VBT
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+commit 6363ee6f496eb7e3b3f78dc105e522c7b496089b upstream.
+
+On some laptops there is no HDMI/DP. But the xrandr still reports
+several disconnected HDMI/display ports. In such case the user will be
+confused.
+ >DVI1 disconnected (normal left inverted right x axis y axis)
+ >DP1 disconnected (normal left inverted right x axis y axis)
+ >DVI2 disconnected (normal left inverted right x axis y axis)
+ >DP2 disconnected (normal left inverted right x axis y axis)
+ >DP3 disconnected (normal left inverted right x axis y axis)
+
+This patch set is to use the child device parsed in VBT to decide whether
+the HDMI/DP/LVDS/TV should be initialized.
+
+Parse the child device from VBT.
+
+The device class type is also added for LFP, TV, HDMI, DP output.
+
+https://bugs.freedesktop.org/show_bug.cgi?id=22785
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Reviewed-by: Adam Jackson <ajax@redhat.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_dma.c | 9 +++++
+ drivers/gpu/drm/i915/i915_drv.h | 2 +
+ drivers/gpu/drm/i915/intel_bios.c | 65 ++++++++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/i915/intel_bios.h | 17 +++++++++
+ 4 files changed, 93 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1526,6 +1526,15 @@ int i915_driver_unload(struct drm_device
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++ /*
++ * free the memory space allocated for the child device
++ * config parsed from VBT
++ */
++ if (dev_priv->child_dev && dev_priv->child_dev_num) {
++ kfree(dev_priv->child_dev);
++ dev_priv->child_dev = NULL;
++ dev_priv->child_dev_num = 0;
++ }
+ drm_irq_uninstall(dev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ }
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -555,6 +555,8 @@ typedef struct drm_i915_private {
+ struct timer_list idle_timer;
+ bool busy;
+ u16 orig_clock;
++ int child_dev_num;
++ struct child_device_config *child_dev;
+ struct drm_connector *int_lvds_connector;
+ } drm_i915_private_t;
+
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -362,6 +362,70 @@ parse_driver_features(struct drm_i915_pr
+ dev_priv->render_reclock_avail = true;
+ }
+
++static void
++parse_device_mapping(struct drm_i915_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_general_definitions *p_defs;
++ struct child_device_config *p_child, *child_dev_ptr;
++ int i, child_device_num, count;
++ u16 block_size;
++
++ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
++ if (!p_defs) {
++ DRM_DEBUG_KMS("No general definition block is found\n");
++ return;
++ }
++ /* judge whether the size of child device meets the requirements.
++ * If the child device size obtained from general definition block
++ * is different with sizeof(struct child_device_config), skip the
++ * parsing of sdvo device info
++ */
++ if (p_defs->child_dev_size != sizeof(*p_child)) {
++ /* different child dev size . Ignore it */
++ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
++ return;
++ }
++ /* get the block size of general definitions */
++ block_size = get_blocksize(p_defs);
++ /* get the number of child device */
++ child_device_num = (block_size - sizeof(*p_defs)) /
++ sizeof(*p_child);
++ count = 0;
++ /* get the number of child device that is present */
++ for (i = 0; i < child_device_num; i++) {
++ p_child = &(p_defs->devices[i]);
++ if (!p_child->device_type) {
++ /* skip the device block if device type is invalid */
++ continue;
++ }
++ count++;
++ }
++ if (!count) {
++ DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
++ return;
++ }
++ dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
++ if (!dev_priv->child_dev) {
++ DRM_DEBUG_KMS("No memory space for child device\n");
++ return;
++ }
++
++ dev_priv->child_dev_num = count;
++ count = 0;
++ for (i = 0; i < child_device_num; i++) {
++ p_child = &(p_defs->devices[i]);
++ if (!p_child->device_type) {
++ /* skip the device block if device type is invalid */
++ continue;
++ }
++ child_dev_ptr = dev_priv->child_dev + count;
++ count++;
++ memcpy((void *)child_dev_ptr, (void *)p_child,
++ sizeof(*p_child));
++ }
++ return;
++}
+ /**
+ * intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+@@ -413,6 +477,7 @@ intel_init_bios(struct drm_device *dev)
+ parse_lfp_panel_data(dev_priv, bdb);
+ parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
++ parse_device_mapping(dev_priv, bdb);
+ parse_driver_features(dev_priv, bdb);
+
+ pci_unmap_rom(pdev, bios);
+--- a/drivers/gpu/drm/i915/intel_bios.h
++++ b/drivers/gpu/drm/i915/intel_bios.h
+@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *
+ #define SWF14_APM_STANDBY 0x1
+ #define SWF14_APM_RESTORE 0x0
+
++/* Add the device class for LFP, TV, HDMI */
++#define DEVICE_TYPE_INT_LFP 0x1022
++#define DEVICE_TYPE_INT_TV 0x1009
++#define DEVICE_TYPE_HDMI 0x60D2
++#define DEVICE_TYPE_DP 0x68C6
++#define DEVICE_TYPE_eDP 0x78C6
++
++/* define the DVO port for HDMI output type */
++#define DVO_B 1
++#define DVO_C 2
++#define DVO_D 3
++
++/* define the PORT for DP output type */
++#define PORT_IDPB 7
++#define PORT_IDPC 8
++#define PORT_IDPD 9
++
+ #endif /* _I830_BIOS_H_ */
+From 38b3037ee47fbd65a36bc7c39f60a900fbbe3b8e Mon Sep 17 00:00:00 2001
+From: Adam Jackson <ajax@redhat.com>
+Date: Tue, 24 Nov 2009 10:07:00 -0500
+Subject: drm/i915: Fix LVDS presence check
+
+Combined patches from 2.6.33 for fixing LVDS detection.
+7cf4f69d3f4511f443473954456cb91d5514756d
+ drm/i915: Don't set up the LVDS if it isn't in the BIOS device table.
+38b3037ee47fbd65a36bc7c39f60a900fbbe3b8e
+ drm/i915: Fix LVDS presence check
+6e36595a2131e7ed5ee2674be54b2713ba7f0490
+ drm/i915: Declare the new VBT parsing functions as static
+11ba159288f1bfc1a475c994e598f5fe423fde9d
+ drm/i915: Don't check for lid presence when detecting LVDS
+
+Acked-by: Takashi Iwai <tiwai@suse.de>
+Cc: Matthew Garrett <mjg@redhat.com>
+Cc: Adam Jackson <ajax@redhat.com>
+Cc: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_lvds.c | 90 +++++++++++++-------------------------
+ 1 file changed, 33 insertions(+), 57 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -901,64 +901,45 @@ static const struct dmi_system_id intel_
+ { } /* terminating entry */
+ };
+
+-#ifdef CONFIG_ACPI
+ /*
+- * check_lid_device -- check whether @handle is an ACPI LID device.
+- * @handle: ACPI device handle
+- * @level : depth in the ACPI namespace tree
+- * @context: the number of LID device when we find the device
+- * @rv: a return value to fill if desired (Not use)
++ * Enumerate the child dev array parsed from VBT to check whether
++ * the LVDS is present.
++ * If it is present, return 1.
++ * If it is not present, return false.
++ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
++ * Note: The addin_offset should also be checked for LVDS panel.
++ * Only when it is non-zero, it is assumed that it is present.
+ */
+-static acpi_status
+-check_lid_device(acpi_handle handle, u32 level, void *context,
+- void **return_value)
++static int lvds_is_present_in_vbt(struct drm_device *dev)
+ {
+- struct acpi_device *acpi_dev;
+- int *lid_present = context;
+-
+- acpi_dev = NULL;
+- /* Get the acpi device for device handle */
+- if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
+- /* If there is no ACPI device for handle, return */
+- return AE_OK;
+- }
+-
+- if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
+- *lid_present = 1;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct child_device_config *p_child;
++ int i, ret;
+
+- return AE_OK;
+-}
++ if (!dev_priv->child_dev_num)
++ return 1;
+
+-/**
+- * check whether there exists the ACPI LID device by enumerating the ACPI
+- * device tree.
+- */
+-static int intel_lid_present(void)
+-{
+- int lid_present = 0;
++ ret = 0;
++ for (i = 0; i < dev_priv->child_dev_num; i++) {
++ p_child = dev_priv->child_dev + i;
++ /*
++ * If the device type is not LFP, continue.
++ * If the device type is 0x22, it is also regarded as LFP.
++ */
++ if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
++ p_child->device_type != DEVICE_TYPE_LFP)
++ continue;
+
+- if (acpi_disabled) {
+- /* If ACPI is disabled, there is no ACPI device tree to
+- * check, so assume the LID device would have been present.
++ /* The addin_offset should be checked. Only when it is
++ * non-zero, it is regarded as present.
+ */
+- return 1;
++ if (p_child->addin_offset) {
++ ret = 1;
++ break;
++ }
+ }
+-
+- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+- ACPI_UINT32_MAX,
+- check_lid_device, &lid_present, NULL);
+-
+- return lid_present;
+-}
+-#else
+-static int intel_lid_present(void)
+-{
+- /* In the absence of ACPI built in, assume that the LID device would
+- * have been present.
+- */
+- return 1;
++ return ret;
+ }
+-#endif
+
+ /**
+ * intel_lvds_init - setup LVDS connectors on this device
+@@ -983,15 +964,10 @@ void intel_lvds_init(struct drm_device *
+ if (dmi_check_system(intel_no_lvds))
+ return;
+
+- /* Assume that any device without an ACPI LID device also doesn't
+- * have an integrated LVDS. We would be better off parsing the BIOS
+- * to get a reliable indicator, but that code isn't written yet.
+- *
+- * In the case of all-in-one desktops using LVDS that we've seen,
+- * they're using SDVO LVDS.
+- */
+- if (!intel_lid_present())
++ if (!lvds_is_present_in_vbt(dev)) {
++ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
++ }
+
+ if (IS_IGDNG(dev)) {
+ if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+From 944001201ca0196bcdb088129e5866a9f379d08c Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Tue, 20 Jul 2010 13:15:31 +1000
+Subject: drm/i915: enable low power render writes on GEN3 hardware.
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 944001201ca0196bcdb088129e5866a9f379d08c upstream.
+
+A lot of 945GMs have had stability issues for a long time, this manifested as X hangs, blitter engine hangs, and lots of crashes.
+
+one such report is at:
+https://bugs.freedesktop.org/show_bug.cgi?id=20560
+
+along with numerous distro bugzillas.
+
+This only took a week of digging and hair ripping to figure out.
+
+Tracked down and tested on a 945GM Lenovo T60,
+previously running
+x11perf -copypixwin500
+or
+x11perf -copywinpix500
+repeatedly would cause the GPU to wedge within 4 or 5 tries, with random busy bits set.
+
+After this patch no hangs were observed.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_gem.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -4697,6 +4697,16 @@ i915_gem_load(struct drm_device *dev)
+ list_add(&dev_priv->mm.shrink_list, &shrink_list);
+ spin_unlock(&shrink_list_lock);
+
++ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
++ if (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
++ u32 tmp = I915_READ(MI_ARB_STATE);
++ if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
++ /* arb state is a masked write, so set bit + bit in mask */
++ tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
++ I915_WRITE(MI_ARB_STATE, tmp);
++ }
++ }
++
+ /* Old X drivers will take 0-2 for front, back, depth buffers */
+ dev_priv->fence_reg_start = 3;
+
+From 45503ded966c98e604c9667c0b458d40666b9ef3 Mon Sep 17 00:00:00 2001
+From: Keith Packard <keithp@keithp.com>
+Date: Mon, 19 Jul 2010 21:12:35 -0700
+Subject: drm/i915: Define MI_ARB_STATE bits
+
+From: Keith Packard <keithp@keithp.com>
+
+commit 45503ded966c98e604c9667c0b458d40666b9ef3 upstream.
+
+The i915 memory arbiter has a register full of configuration
+bits which are currently not defined in the driver header file.
+
+Signed-off-by: Keith Packard <keithp@keithp.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_reg.h | 64 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 64 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -307,6 +307,70 @@
+ #define LM_BURST_LENGTH 0x00000700
+ #define LM_FIFO_WATERMARK 0x0000001F
+ #define MI_ARB_STATE 0x020e4 /* 915+ only */
++#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
++
++/* Make render/texture TLB fetches lower priorty than associated data
++ * fetches. This is not turned on by default
++ */
++#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
++
++/* Isoch request wait on GTT enable (Display A/B/C streams).
++ * Make isoch requests stall on the TLB update. May cause
++ * display underruns (test mode only)
++ */
++#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
++
++/* Block grant count for isoch requests when block count is
++ * set to a finite value.
++ */
++#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
++#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
++#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
++#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
++#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
++
++/* Enable render writes to complete in C2/C3/C4 power states.
++ * If this isn't enabled, render writes are prevented in low
++ * power states. That seems bad to me.
++ */
++#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
++
++/* This acknowledges an async flip immediately instead
++ * of waiting for 2TLB fetches.
++ */
++#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
++
++/* Enables non-sequential data reads through arbiter
++ */
++#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
++
++/* Disable FSB snooping of cacheable write cycles from binner/render
++ * command stream
++ */
++#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
++
++/* Arbiter time slice for non-isoch streams */
++#define MI_ARB_TIME_SLICE_MASK (7 << 5)
++#define MI_ARB_TIME_SLICE_1 (0 << 5)
++#define MI_ARB_TIME_SLICE_2 (1 << 5)
++#define MI_ARB_TIME_SLICE_4 (2 << 5)
++#define MI_ARB_TIME_SLICE_6 (3 << 5)
++#define MI_ARB_TIME_SLICE_8 (4 << 5)
++#define MI_ARB_TIME_SLICE_10 (5 << 5)
++#define MI_ARB_TIME_SLICE_14 (6 << 5)
++#define MI_ARB_TIME_SLICE_16 (7 << 5)
++
++/* Low priority grace period page size */
++#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
++#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
++
++/* Disable display A/B trickle feed */
++#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
++
++/* Set display plane priority */
++#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
++#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
++
+ #define CACHE_MODE_0 0x02120 /* 915+ only */
+ #define CM0_MASK_SHIFT 16
+ #define CM0_IZ_OPT_DISABLE (1<<6)
+From 0725e95ea56698774e893edb7e7276b1d6890954 Mon Sep 17 00:00:00 2001
+From: Bernhard Rosenkraenzer <br@blankpage.ch>
+Date: Wed, 10 Mar 2010 12:36:43 +0100
+Subject: USB: qcserial: add new device ids
+
+From: Bernhard Rosenkraenzer <br@blankpage.ch>
+
+commit 0725e95ea56698774e893edb7e7276b1d6890954 upstream.
+
+This patch adds various USB device IDs for Gobi 2000 devices, as found in the
+drivers available at https://www.codeaurora.org/wiki/GOBI_Releases
+
+Signed-off-by: Bernhard Rosenkraenzer <bero@arklinux.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/serial/qcserial.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -47,6 +47,35 @@ static struct usb_device_id id_table[] =
+ {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
+ {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
+ {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
++ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
++ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
++ {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
++ {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
++ {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
++ {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
++ {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
++ {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
++ {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
++ {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
++ {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
++ {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
++ {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
++ {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
++ {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
++ {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
++ {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
++ {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
++ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+From 9cf00977da092096c7a983276dad8b3002d23a99 Mon Sep 17 00:00:00 2001
+From: Adam Jackson <ajax@redhat.com>
+Date: Thu, 3 Dec 2009 17:44:36 -0500
+Subject: drm/edid: Unify detailed block parsing between base and extension blocks
+
+From: Adam Jackson <ajax@redhat.com>
+
+commit 9cf00977da092096c7a983276dad8b3002d23a99 upstream.
+
+Also fix an embarassing bug in standard timing subblock parsing that
+would result in an infinite loop.
+
+Signed-off-by: Adam Jackson <ajax@redhat.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Cc: maximilian attems <max@stro.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/drm_edid.c | 163 ++++++++++++++++-----------------------------
+ 1 file changed, 61 insertions(+), 102 deletions(-)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -834,8 +834,57 @@ static int add_standard_modes(struct drm
+ return modes;
+ }
+
++static int add_detailed_modes(struct drm_connector *connector,
++ struct detailed_timing *timing,
++ struct edid *edid, u32 quirks, int preferred)
++{
++ int i, modes = 0;
++ struct detailed_non_pixel *data = &timing->data.other_data;
++ int timing_level = standard_timing_level(edid);
++ struct drm_display_mode *newmode;
++ struct drm_device *dev = connector->dev;
++
++ if (timing->pixel_clock) {
++ newmode = drm_mode_detailed(dev, edid, timing, quirks);
++ if (!newmode)
++ return 0;
++
++ if (preferred)
++ newmode->type |= DRM_MODE_TYPE_PREFERRED;
++
++ drm_mode_probed_add(connector, newmode);
++ return 1;
++ }
++
++ /* other timing types */
++ switch (data->type) {
++ case EDID_DETAIL_MONITOR_RANGE:
++ /* Get monitor range data */
++ break;
++ case EDID_DETAIL_STD_MODES:
++ /* Six modes per detailed section */
++ for (i = 0; i < 6; i++) {
++ struct std_timing *std;
++ struct drm_display_mode *newmode;
++
++ std = &data->data.timings[i];
++ newmode = drm_mode_std(dev, std, edid->revision,
++ timing_level);
++ if (newmode) {
++ drm_mode_probed_add(connector, newmode);
++ modes++;
++ }
++ }
++ break;
++ default:
++ break;
++ }
++
++ return modes;
++}
++
+ /**
+- * add_detailed_modes - get detailed mode info from EDID data
++ * add_detailed_info - get detailed mode info from EDID data
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+@@ -846,67 +895,24 @@ static int add_standard_modes(struct drm
+ static int add_detailed_info(struct drm_connector *connector,
+ struct edid *edid, u32 quirks)
+ {
+- struct drm_device *dev = connector->dev;
+- int i, j, modes = 0;
+- int timing_level;
+-
+- timing_level = standard_timing_level(edid);
++ int i, modes = 0;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
+ struct detailed_timing *timing = &edid->detailed_timings[i];
+- struct detailed_non_pixel *data = &timing->data.other_data;
+- struct drm_display_mode *newmode;
++ int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+- /* X server check is version 1.1 or higher */
+- if (edid->version == 1 && edid->revision >= 1 &&
+- !timing->pixel_clock) {
+- /* Other timing or info */
+- switch (data->type) {
+- case EDID_DETAIL_MONITOR_SERIAL:
+- break;
+- case EDID_DETAIL_MONITOR_STRING:
+- break;
+- case EDID_DETAIL_MONITOR_RANGE:
+- /* Get monitor range data */
+- break;
+- case EDID_DETAIL_MONITOR_NAME:
+- break;
+- case EDID_DETAIL_MONITOR_CPDATA:
+- break;
+- case EDID_DETAIL_STD_MODES:
+- for (j = 0; j < 6; i++) {
+- struct std_timing *std;
+- struct drm_display_mode *newmode;
+-
+- std = &data->data.timings[j];
+- newmode = drm_mode_std(dev, std,
+- edid->revision,
+- timing_level);
+- if (newmode) {
+- drm_mode_probed_add(connector, newmode);
+- modes++;
+- }
+- }
+- break;
+- default:
+- break;
+- }
+- } else {
+- newmode = drm_mode_detailed(dev, edid, timing, quirks);
+- if (!newmode)
+- continue;
+-
+- /* First detailed mode is preferred */
+- if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
+- newmode->type |= DRM_MODE_TYPE_PREFERRED;
+- drm_mode_probed_add(connector, newmode);
++ /* In 1.0, only timings are allowed */
++ if (!timing->pixel_clock && edid->version == 1 &&
++ edid->revision == 0)
++ continue;
+
+- modes++;
+- }
++ modes += add_detailed_modes(connector, timing, edid, quirks,
++ preferred);
+ }
+
+ return modes;
+ }
++
+ /**
+ * add_detailed_mode_eedid - get detailed mode info from addtional timing
+ * EDID block
+@@ -920,12 +926,9 @@ static int add_detailed_info(struct drm_
+ static int add_detailed_info_eedid(struct drm_connector *connector,
+ struct edid *edid, u32 quirks)
+ {
+- struct drm_device *dev = connector->dev;
+- int i, j, modes = 0;
++ int i, modes = 0;
+ char *edid_ext = NULL;
+ struct detailed_timing *timing;
+- struct detailed_non_pixel *data;
+- struct drm_display_mode *newmode;
+ int edid_ext_num;
+ int start_offset, end_offset;
+ int timing_level;
+@@ -976,51 +979,7 @@ static int add_detailed_info_eedid(struc
+ for (i = start_offset; i < end_offset;
+ i += sizeof(struct detailed_timing)) {
+ timing = (struct detailed_timing *)(edid_ext + i);
+- data = &timing->data.other_data;
+- /* Detailed mode timing */
+- if (timing->pixel_clock) {
+- newmode = drm_mode_detailed(dev, edid, timing, quirks);
+- if (!newmode)
+- continue;
+-
+- drm_mode_probed_add(connector, newmode);
+-
+- modes++;
+- continue;
+- }
+-
+- /* Other timing or info */
+- switch (data->type) {
+- case EDID_DETAIL_MONITOR_SERIAL:
+- break;
+- case EDID_DETAIL_MONITOR_STRING:
+- break;
+- case EDID_DETAIL_MONITOR_RANGE:
+- /* Get monitor range data */
+- break;
+- case EDID_DETAIL_MONITOR_NAME:
+- break;
+- case EDID_DETAIL_MONITOR_CPDATA:
+- break;
+- case EDID_DETAIL_STD_MODES:
+- /* Five modes per detailed section */
+- for (j = 0; j < 5; i++) {
+- struct std_timing *std;
+- struct drm_display_mode *newmode;
+-
+- std = &data->data.timings[j];
+- newmode = drm_mode_std(dev, std,
+- edid->revision,
+- timing_level);
+- if (newmode) {
+- drm_mode_probed_add(connector, newmode);
+- modes++;
+- }
+- }
+- break;
+- default:
+- break;
+- }
++ modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+ }
+
+ return modes;
+From 29874f44fbcbc24b231b42c9956f8f9de9407231 Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shaohua.li@intel.com>
+Date: Wed, 18 Nov 2009 15:15:02 +0800
+Subject: drm/i915: fix gpio register detection logic for BIOS without VBT
+
+From: Shaohua Li <shaohua.li@intel.com>
+
+commit 29874f44fbcbc24b231b42c9956f8f9de9407231 upstream.
+
+if no VBT is present, crt_ddc_bus will be left at 0, and cause us
+to use that for the GPIO register offset. That's never a valid register
+offset, so let the "undefined" value be 0 instead of -1.
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+[anholt: clarified the commit message a bit]
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_drv.h | 2 +-
+ drivers/gpu/drm/i915/intel_bios.c | 4 ----
+ drivers/gpu/drm/i915/intel_crt.c | 2 +-
+ 3 files changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -258,7 +258,7 @@ typedef struct drm_i915_private {
+
+ struct notifier_block lid_notifier;
+
+- int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
++ int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
+ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -241,10 +241,6 @@ parse_general_definitions(struct drm_i91
+ GPIOF,
+ };
+
+- /* Set sensible defaults in case we can't find the general block
+- or it is the wrong chipset */
+- dev_priv->crt_ddc_bus = -1;
+-
+ general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (general) {
+ u16 block_size = get_blocksize(general);
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -557,7 +557,7 @@ void intel_crt_init(struct drm_device *d
+ else {
+ i2c_reg = GPIOA;
+ /* Use VBT information for CRT DDC if available */
+- if (dev_priv->crt_ddc_bus != -1)
++ if (dev_priv->crt_ddc_bus != 0)
+ i2c_reg = dev_priv->crt_ddc_bus;
+ }
+ intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
+From 290e55056ec3d25c72088628245d8cae037b30db Mon Sep 17 00:00:00 2001
+From: Maarten Maathuis <madman2003@gmail.com>
+Date: Sat, 20 Feb 2010 03:22:21 +0100
+Subject: drm/ttm: handle OOM in ttm_tt_swapout
+
+From: Maarten Maathuis <madman2003@gmail.com>
+
+commit 290e55056ec3d25c72088628245d8cae037b30db upstream.
+
+- Without this change I get a general protection fault.
+- Also use PTR_ERR where applicable.
+
+Signed-off-by: Maarten Maathuis <madman2003@gmail.com>
+Reviewed-by: Dave Airlie <airlied@redhat.com>
+Acked-by: Thomas Hellstrom <thellstrom@vmware.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/ttm/ttm_tt.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_tt.c
++++ b/drivers/gpu/drm/ttm/ttm_tt.c
+@@ -466,7 +466,7 @@ static int ttm_tt_swapin(struct ttm_tt *
+ void *from_virtual;
+ void *to_virtual;
+ int i;
+- int ret;
++ int ret = -ENOMEM;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
+@@ -485,8 +485,10 @@ static int ttm_tt_swapin(struct ttm_tt *
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ from_page = read_mapping_page(swap_space, i, NULL);
+- if (IS_ERR(from_page))
++ if (IS_ERR(from_page)) {
++ ret = PTR_ERR(from_page);
+ goto out_err;
++ }
+ to_page = __ttm_tt_get_page(ttm, i);
+ if (unlikely(to_page == NULL))
+ goto out_err;
+@@ -509,7 +511,7 @@ static int ttm_tt_swapin(struct ttm_tt *
+ return 0;
+ out_err:
+ ttm_tt_free_alloced_pages(ttm);
+- return -ENOMEM;
++ return ret;
+ }
+
+ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
+@@ -521,6 +523,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, s
+ void *from_virtual;
+ void *to_virtual;
+ int i;
++ int ret = -ENOMEM;
+
+ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
+ BUG_ON(ttm->caching_state != tt_cached);
+@@ -543,7 +546,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, s
+ 0);
+ if (unlikely(IS_ERR(swap_storage))) {
+ printk(KERN_ERR "Failed allocating swap storage.\n");
+- return -ENOMEM;
++ return PTR_ERR(swap_storage);
+ }
+ } else
+ swap_storage = persistant_swap_storage;
+@@ -555,9 +558,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, s
+ if (unlikely(from_page == NULL))
+ continue;
+ to_page = read_mapping_page(swap_space, i, NULL);
+- if (unlikely(to_page == NULL))
++ if (unlikely(IS_ERR(to_page))) {
++ ret = PTR_ERR(to_page);
+ goto out_err;
+-
++ }
+ preempt_disable();
+ from_virtual = kmap_atomic(from_page, KM_USER0);
+ to_virtual = kmap_atomic(to_page, KM_USER1);
+@@ -581,5 +585,5 @@ out_err:
+ if (!persistant_swap_storage)
+ fput(swap_storage);
+
+- return -ENOMEM;
++ return ret;
+ }
+From 70136081fc67ea77d849f86fa323e5773c8e40ea Mon Sep 17 00:00:00 2001
+From: Theodore Kilgore <kilgota@auburn.edu>
+Date: Fri, 25 Dec 2009 05:15:10 -0300
+Subject: V4L/DVB (13991): gspca_mr973010a: Fix cif type 1 cameras not streaming on UHCI controllers
+
+From: Theodore Kilgore <kilgota@auburn.edu>
+
+commit 70136081fc67ea77d849f86fa323e5773c8e40ea upstream.
+
+If you read the mail to Oliver Neukum on the linux-usb list, then you know
+that I found a cure for the mysterious problem that the MR97310a CIF "type
+1" cameras have been freezing up and refusing to stream if hooked up to a
+machine with a UHCI controller.
+
+Namely, the cure is that if the camera is an mr97310a CIF type 1 camera, you
+have to send it 0xa0, 0x00. Somehow, this is a timing reset command, or
+such. It un-blocks whatever was previously stopping the CIF type 1 cameras
+from working on the UHCI-based machines.
+
+Signed-off-by: Theodore Kilgore <kilgota@auburn.edu>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/gspca/mr97310a.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/media/video/gspca/mr97310a.c
++++ b/drivers/media/video/gspca/mr97310a.c
+@@ -530,6 +530,12 @@ static int start_cif_cam(struct gspca_de
+ {0x13, 0x00, {0x01}, 1},
+ {0, 0, {0}, 0}
+ };
++ /* Without this command the cam won't work with USB-UHCI */
++ gspca_dev->usb_buf[0] = 0x0a;
++ gspca_dev->usb_buf[1] = 0x00;
++ err_code = mr_write(gspca_dev, 2);
++ if (err_code < 0)
++ return err_code;
+ err_code = sensor_write_regs(gspca_dev, cif_sensor1_init_data,
+ ARRAY_SIZE(cif_sensor1_init_data));
+ }
+From 8fcc501831aa5b37a4a5a8cd9dc965be3cacc599 Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Mon, 28 Dec 2009 13:15:20 +0800
+Subject: drm/i915: disable TV hotplug status check
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit 8fcc501831aa5b37a4a5a8cd9dc965be3cacc599 upstream.
+
+As we removed TV hotplug, don't check its status ever.
+
+Reviewed-by: Adam Jackson <ajax@redhat.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_tv.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1801,8 +1801,6 @@ intel_tv_init(struct drm_device *dev)
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_bottom_margin_property,
+ tv_priv->margin[TV_MARGIN_BOTTOM]);
+-
+- dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
+ out:
+ drm_sysfs_connector_add(connector);
+ }
+From 43bcd61fae05fc6062b4f117c5adb1a72c9f8c57 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 3 Nov 2009 09:03:34 +0000
+Subject: drm/i915: fix get_core_clock_speed for G33 class desktop chips
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 43bcd61fae05fc6062b4f117c5adb1a72c9f8c57 upstream.
+
+Somehow the case for G33 got dropped while porting from ums code.
+This made a 400MHz chip into a 133MHz one which resulted in the
+unnecessary enabling of double wide pipe mode which in turn
+screwed up the overlay code.
+
+Nothing else (than the overlay code) seems to be affected.
+
+This fixes fdo.org bug #24835
+
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4322,7 +4322,7 @@ static void intel_init_display(struct dr
+ }
+
+ /* Returns the core display clock speed */
+- if (IS_I945G(dev))
++ if (IS_I945G(dev) || (IS_G33(dev) && ! IS_IGDGM(dev)))
+ dev_priv->display.get_display_clock_speed =
+ i945_get_display_clock_speed;
+ else if (IS_I915G(dev))
+From ceb0297d3da7ecf44bccec2c4520c8710612c238 Mon Sep 17 00:00:00 2001
+From: Jerome Glisse <jglisse@redhat.com>
+Date: Sun, 14 Feb 2010 19:33:18 +0000
+Subject: drm/radeon: r6xx/r7xx possible security issue, system ram access
+
+From: Jerome Glisse <jglisse@redhat.com>
+
+commit c8c15ff1e90bfc4a2db1ba77a01b3b2783e723fc upstream
+
+This patch workaround a possible security issue which can allow
+user to abuse drm on r6xx/r7xx hw to access any system ram memory.
+This patch doesn't break userspace, it detect "valid" old use of
+CB_COLOR[0-7]_FRAG & CB_COLOR[0-7]_TILE registers and overwritte
+the address these registers are pointing to with the one of the
+last color buffer. This workaround will work for old mesa &
+xf86-video-ati and any old user which did use similar register
+programming pattern as those (we expect that there is no others
+user of those ioctl except possibly a malicious one). This patch
+add a warning if it detects such usage, warning encourage people
+to update their mesa & xf86-video-ati. New userspace will submit
+proper relocation.
+
+Fix for xf86-video-ati / mesa (this kernel patch is enough to
+prevent abuse, fix for userspace are to set proper cs stream and
+avoid kernel warning) :
+http://cgit.freedesktop.org/xorg/driver/xf86-video-ati/commit/?id=95d63e408cc88b6934bec84a0b1ef94dfe8bee7b
+http://cgit.freedesktop.org/mesa/mesa/commit/?id=46dc6fd3ed5ef96cda53641a97bc68c3bc104a9f
+
+Abusing this register to perform system ram memory is not easy,
+here is outline on how it could be achieve. First attacker must
+have access to the drm device and be able to submit command stream
+throught cs ioctl. Then attacker must build a proper command stream
+for r6xx/r7xx hw which will abuse the FRAG or TILE buffer to
+overwrite the GPU GART which is in VRAM. To achieve so attacker
+as to setup CB_COLOR[0-7]_FRAG or CB_COLOR[0-7]_TILE to point
+to the GPU GART, then it has to find a way to write predictable
+value into those buffer (with little cleverness i believe this
+can be done but this is an hard task). Once attacker have such
+program it can overwritte GPU GART to program GPU gart to point
+anywhere in system memory. It then can reusse same method as he
+used to reprogram GART to overwritte the system ram through the
+GART mapping. In the process the attacker has to be carefull to
+not overwritte any sensitive area of the GART table, like ring
+or IB gart entry as it will more then likely lead to GPU lockup.
+Bottom line is that i think it's very hard to use this flaw
+to get system ram access but in theory one can achieve so.
+
+Side note: I am not aware of anyone ever using the GPU as an
+attack vector, nevertheless we take great care in the opensource
+driver to try to detect and forbid malicious use of GPU. I don't
+think the closed source driver are as cautious as we are.
+
+[bwh: Adjusted context for 2.6.32]
+Signed-off-by: Jerome Glisse <jglisse@redhat.com>
+Signed-off-by: Dave Airlie <airlied@linux.ie>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/r600_cs.c | 83 +++++++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/radeon/r600d.h | 26 +++++++++++
+ drivers/gpu/drm/radeon/radeon.h | 1
+ drivers/gpu/drm/radeon/radeon_cs.c | 1
+ 4 files changed, 111 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nom
+ typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
+ static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+
++struct r600_cs_track {
++ u32 cb_color0_base_last;
++};
++
+ /**
+ * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nom
+ }
+
+ /**
++ * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
++ * @parser: parser structure holding parsing context.
++ *
++ * Check next packet is relocation packet3, do bo validation and compute
++ * GPU offset using the provided start.
++ **/
++static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
++{
++ struct radeon_cs_packet p3reloc;
++ int r;
++
++ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
++ if (r) {
++ return 0;
++ }
++ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
++ return 0;
++ }
++ return 1;
++}
++
++/**
+ * r600_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser: parser structure holding parsing context.
+ *
+@@ -337,6 +363,7 @@ static int r600_packet3_check(struct rad
+ struct radeon_cs_packet *pkt)
+ {
+ struct radeon_cs_reloc *reloc;
++ struct r600_cs_track *track;
+ volatile u32 *ib;
+ unsigned idx;
+ unsigned i;
+@@ -344,6 +371,7 @@ static int r600_packet3_check(struct rad
+ int r;
+ u32 idx_value;
+
++ track = (struct r600_cs_track *)p->track;
+ ib = p->ib->ptr;
+ idx = pkt->idx + 1;
+ idx_value = radeon_get_ib_value(p, idx);
+@@ -503,9 +531,60 @@ static int r600_packet3_check(struct rad
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ switch (reg) {
++ /* This register were added late, there is userspace
++ * which does provide relocation for those but set
++ * 0 offset. In order to avoid breaking old userspace
++ * we detect this and set address to point to last
++ * CB_COLOR0_BASE, note that if userspace doesn't set
++ * CB_COLOR0_BASE before this register we will report
++ * error. Old userspace always set CB_COLOR0_BASE
++ * before any of this.
++ */
++ case R_0280E0_CB_COLOR0_FRAG:
++ case R_0280E4_CB_COLOR1_FRAG:
++ case R_0280E8_CB_COLOR2_FRAG:
++ case R_0280EC_CB_COLOR3_FRAG:
++ case R_0280F0_CB_COLOR4_FRAG:
++ case R_0280F4_CB_COLOR5_FRAG:
++ case R_0280F8_CB_COLOR6_FRAG:
++ case R_0280FC_CB_COLOR7_FRAG:
++ case R_0280C0_CB_COLOR0_TILE:
++ case R_0280C4_CB_COLOR1_TILE:
++ case R_0280C8_CB_COLOR2_TILE:
++ case R_0280CC_CB_COLOR3_TILE:
++ case R_0280D0_CB_COLOR4_TILE:
++ case R_0280D4_CB_COLOR5_TILE:
++ case R_0280D8_CB_COLOR6_TILE:
++ case R_0280DC_CB_COLOR7_TILE:
++ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
++ if (!track->cb_color0_base_last) {
++ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
++ return -EINVAL;
++ }
++ ib[idx+1+i] = track->cb_color0_base_last;
++ printk_once(KERN_WARNING "You have old & broken userspace "
++ "please consider updating mesa & xf86-video-ati\n");
++ } else {
++ r = r600_cs_packet_next_reloc(p, &reloc);
++ if (r) {
++ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
++ return -EINVAL;
++ }
++ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++ }
++ break;
+ case DB_DEPTH_BASE:
+ case DB_HTILE_DATA_BASE:
+ case CB_COLOR0_BASE:
++ r = r600_cs_packet_next_reloc(p, &reloc);
++ if (r) {
++ DRM_ERROR("bad SET_CONTEXT_REG "
++ "0x%04X\n", reg);
++ return -EINVAL;
++ }
++ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++ track->cb_color0_base_last = ib[idx+1+i];
++ break;
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+@@ -678,8 +757,11 @@ static int r600_packet3_check(struct rad
+ int r600_cs_parse(struct radeon_cs_parser *p)
+ {
+ struct radeon_cs_packet pkt;
++ struct r600_cs_track *track;
+ int r;
+
++ track = kzalloc(sizeof(*track), GFP_KERNEL);
++ p->track = track;
+ do {
+ r = r600_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *de
+ /* initialize parser */
+ memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ parser.filp = filp;
++ parser.dev = &dev->pdev->dev;
+ parser.rdev = NULL;
+ parser.family = family;
+ parser.ib = &fake_ib;
+--- a/drivers/gpu/drm/radeon/r600d.h
++++ b/drivers/gpu/drm/radeon/r600d.h
+@@ -674,4 +674,30 @@
+ #define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
+ #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
+
++#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
++
++#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
++#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
++#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
++#define C_0280E0_BASE_256B 0x00000000
++#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
++#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
++#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
++#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
++#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
++#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
++#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
++#define R_0280C0_CB_COLOR0_TILE 0x0280C0
++#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
++#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
++#define C_0280C0_BASE_256B 0x00000000
++#define R_0280C4_CB_COLOR1_TILE 0x0280C4
++#define R_0280C8_CB_COLOR2_TILE 0x0280C8
++#define R_0280CC_CB_COLOR3_TILE 0x0280CC
++#define R_0280D0_CB_COLOR4_TILE 0x0280D0
++#define R_0280D4_CB_COLOR5_TILE 0x0280D4
++#define R_0280D8_CB_COLOR6_TILE 0x0280D8
++#define R_0280DC_CB_COLOR7_TILE 0x0280DC
++
++
+ #endif
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -448,6 +448,7 @@ struct radeon_cs_chunk {
+ };
+
+ struct radeon_cs_parser {
++ struct device *dev;
+ struct radeon_device *rdev;
+ struct drm_file *filp;
+ /* chunks */
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -230,6 +230,7 @@ int radeon_cs_ioctl(struct drm_device *d
+ memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ parser.filp = filp;
+ parser.rdev = rdev;
++ parser.dev = rdev->dev;
+ r = radeon_cs_parser_init(&parser, data);
+ if (r) {
+ DRM_ERROR("Failed to initialize parser !\n");
+From e3dae5087754984ed7e6daf4fbb742ff026aadd5 Mon Sep 17 00:00:00 2001
+From: Jerome Glisse <jglisse@redhat.com>
+Date: Sun, 14 Feb 2010 19:31:58 +0000
+Subject: drm/radeon/kms: r600/r700 don't test ib if ib initialization fails
+
+From: Jerome Glisse <jglisse@redhat.com>
+
+commit db96380ea26fcc31ab37189aedeabd12894b1431 upstream
+
+If ib initialization failed don't try to test ib as it will result
+in an oops (accessing NULL ib buffer ptr).
+
+[bwh: Adjusted context for 2.6.32]
+Signed-off-by: Jerome Glisse <jglisse@redhat.com>
+Signed-off-by: Dave Airlie <airlied@linux.ie>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/r600.c | 13 +++++++------
+ drivers/gpu/drm/radeon/rv770.c | 13 +++++++------
+ 2 files changed, 14 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -1686,13 +1686,14 @@ int r600_init(struct radeon_device *rdev
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+- rdev->accel_working = false;
+- }
+- r = r600_ib_test(rdev);
+- if (r) {
+- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
++ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
++ } else {
++ r = r600_ib_test(rdev);
++ if (r) {
++ dev_err(rdev->dev, "IB test failed (%d).\n", r);
++ rdev->accel_working = false;
++ }
+ }
+ }
+ return 0;
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1034,13 +1034,14 @@ int rv770_init(struct radeon_device *rde
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+- rdev->accel_working = false;
+- }
+- r = r600_ib_test(rdev);
+- if (r) {
+- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
++ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
++ } else {
++ r = r600_ib_test(rdev);
++ if (r) {
++ dev_err(rdev->dev, "IB test failed (%d).\n", r);
++ rdev->accel_working = false;
++ }
+ }
+ }
+ return 0;
+From 7e71c9e2e7704ebf044d4a964e02fbd2098a173f Mon Sep 17 00:00:00 2001
+From: Jerome Glisse <jglisse@redhat.com>
+Date: Sun, 17 Jan 2010 21:21:41 +0100
+Subject: drm/radeon/kms: Forbid creation of framebuffer with no valid GEM object
+
+From: Jerome Glisse <jglisse@redhat.com>
+
+commit 7e71c9e2e7704ebf044d4a964e02fbd2098a173f upstream.
+
+This will avoid oops if at later point the fb is use. Trying to create
+a framebuffer with no valid GEM object is bogus and should be forbidden
+as this patch does.
+
+Signed-off-by: Jerome Glisse <jglisse@redhat.com>
+Signed-off-by: Dave Airlie <airlied@linux.ie>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/radeon_display.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -599,7 +599,11 @@ radeon_user_framebuffer_create(struct dr
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+-
++ if (obj == NULL) {
++ dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
++ "can't create framebuffer\n", mode_cmd->handle);
++ return NULL;
++ }
+ return radeon_framebuffer_create(dev, mode_cmd, obj);
+ }
+
+From 1379d2fef0ec07c7027a5e89036025ce761470c8 Mon Sep 17 00:00:00 2001
+From: Zhang Rui <rui.zhang@intel.com>
+Date: Tue, 16 Feb 2010 04:16:55 -0500
+Subject: ACPI, i915: blacklist Clevo M5x0N bad_lid state
+
+From: Zhang Rui <rui.zhang@intel.com>
+
+commit 1379d2fef0ec07c7027a5e89036025ce761470c8 upstream.
+
+Wrong Lid state reported.
+Need to blacklist this machine for LVDS detection.
+
+Signed-off-by: Zhang Rui <rui.zhang@intel.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_lvds.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -629,6 +629,13 @@ static const struct dmi_system_id bad_li
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
+ },
+ },
++ {
++ .ident = "Clevo M5x0N",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
++ DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
++ },
++ },
+ { }
+ };
+
+From 01d4503968f471f876fb44335800d2cf8dc5a2ce Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Sun, 31 Jan 2010 07:07:14 +1000
+Subject: drm/radeon/kms: use udelay for short delays
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 01d4503968f471f876fb44335800d2cf8dc5a2ce upstream.
+
+For usec delays use udelay instead of scheduling, this should
+allow reclocking to happen faster. This also was the cause
+of reported 33s delays at bootup on certain systems.
+
+fixes: freedesktop.org bug 25506
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/atom.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -607,7 +607,7 @@ static void atom_op_delay(atom_exec_cont
+ uint8_t count = U8((*ptr)++);
+ SDEBUG(" count: %d\n", count);
+ if (arg == ATOM_UNIT_MICROSEC)
+- schedule_timeout_uninterruptible(usecs_to_jiffies(count));
++ udelay(count);
+ else
+ schedule_timeout_uninterruptible(msecs_to_jiffies(count));
+ }
+From b9241ea31fae4887104e5d1b3b18f4009c25a0c4 Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Wed, 25 Nov 2009 13:09:39 +0800
+Subject: drm/i915: Don't wait interruptible for possible plane buffer flush
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit b9241ea31fae4887104e5d1b3b18f4009c25a0c4 upstream.
+
+When we setup buffer for display plane, we'll check any pending
+required GPU flush and possible make interruptible wait for flush
+complete. But that wait would be most possibly to fail in case of
+signals received for X process, which will then fail modeset process
+and put display engine in unconsistent state. The result could be
+blank screen or CPU hang, and DDX driver would always turn on outputs
+DPMS after whatever modeset fails or not.
+
+So this one creates new helper for setup display plane buffer, and
+when needing flush using uninterruptible wait for that.
+
+This one should fix bug like https://bugs.freedesktop.org/show_bug.cgi?id=24009.
+Also fixing mode switch stress test on Ironlake.
+
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_drv.h | 1
+ drivers/gpu/drm/i915/i915_gem.c | 51 +++++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/i915/intel_display.c | 2 -
+ 3 files changed, 53 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -829,6 +829,7 @@ int i915_lp_ring_sync(struct drm_device
+ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
++int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
+ int i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_gem_object *obj, int id);
+ void i915_gem_detach_phys_object(struct drm_device *dev,
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2825,6 +2825,57 @@ i915_gem_object_set_to_gtt_domain(struct
+ return 0;
+ }
+
++/*
++ * Prepare buffer for display plane. Use uninterruptible for possible flush
++ * wait, as in modesetting process we're not supposed to be interrupted.
++ */
++int
++i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
++{
++ struct drm_device *dev = obj->dev;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ uint32_t old_write_domain, old_read_domains;
++ int ret;
++
++ /* Not valid to be called on unbound objects. */
++ if (obj_priv->gtt_space == NULL)
++ return -EINVAL;
++
++ i915_gem_object_flush_gpu_write_domain(obj);
++
++ /* Wait on any GPU rendering and flushing to occur. */
++ if (obj_priv->active) {
++#if WATCH_BUF
++ DRM_INFO("%s: object %p wait for seqno %08x\n",
++ __func__, obj, obj_priv->last_rendering_seqno);
++#endif
++ ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
++ if (ret != 0)
++ return ret;
++ }
++
++ old_write_domain = obj->write_domain;
++ old_read_domains = obj->read_domains;
++
++ obj->read_domains &= I915_GEM_DOMAIN_GTT;
++
++ i915_gem_object_flush_cpu_write_domain(obj);
++
++ /* It should now be out of any other write domains, and we can update
++ * the domain values for our changes.
++ */
++ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
++ obj->read_domains |= I915_GEM_DOMAIN_GTT;
++ obj->write_domain = I915_GEM_DOMAIN_GTT;
++ obj_priv->dirty = 1;
++
++ trace_i915_gem_object_change_domain(obj,
++ old_read_domains,
++ old_write_domain);
++
++ return 0;
++}
++
+ /**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1253,7 +1253,7 @@ intel_pipe_set_base(struct drm_crtc *crt
+ return ret;
+ }
+
+- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
++ ret = i915_gem_object_set_to_display_plane(obj);
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ mutex_unlock(&dev->struct_mutex);
+From 48764bf43f746113fc77877d7e80f2df23ca4cbb Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 15 Sep 2009 22:57:32 +0200
+Subject: drm/i915: add i915_lp_ring_sync helper
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 48764bf43f746113fc77877d7e80f2df23ca4cbb upstream.
+
+This just waits until the hw passed the current ring position with
+cmd execution. This slightly changes the existing i915_wait_request
+function to make uninterruptible waiting possible - no point in
+returning to userspace while mucking around with the overlay, that
+piece of hw is just too fragile.
+
+Also replace a magic 0 with the symbolic constant (and kill the then
+superflous comment) while I was looking at the code.
+
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_drv.h | 1
+ drivers/gpu/drm/i915/i915_gem.c | 49 +++++++++++++++++++++++++++++++---------
+ include/drm/drm_os_linux.h | 2 -
+ 3 files changed, 41 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -825,6 +825,7 @@ void i915_gem_cleanup_ringbuffer(struct
+ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end);
+ int i915_gem_idle(struct drm_device *dev);
++int i915_lp_ring_sync(struct drm_device *dev);
+ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1809,12 +1809,8 @@ i915_gem_retire_work_handler(struct work
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+-/**
+- * Waits for a sequence number to be signaled, and cleans up the
+- * request and object lists appropriately for that event.
+- */
+ static int
+-i915_wait_request(struct drm_device *dev, uint32_t seqno)
++i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 ier;
+@@ -1841,10 +1837,15 @@ i915_wait_request(struct drm_device *dev
+
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ i915_user_irq_get(dev);
+- ret = wait_event_interruptible(dev_priv->irq_queue,
+- i915_seqno_passed(i915_get_gem_seqno(dev),
+- seqno) ||
+- atomic_read(&dev_priv->mm.wedged));
++ if (interruptible)
++ ret = wait_event_interruptible(dev_priv->irq_queue,
++ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
++ atomic_read(&dev_priv->mm.wedged));
++ else
++ wait_event(dev_priv->irq_queue,
++ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
++ atomic_read(&dev_priv->mm.wedged));
++
+ i915_user_irq_put(dev);
+ dev_priv->mm.waiting_gem_seqno = 0;
+
+@@ -1868,6 +1869,34 @@ i915_wait_request(struct drm_device *dev
+ return ret;
+ }
+
++/**
++ * Waits for a sequence number to be signaled, and cleans up the
++ * request and object lists appropriately for that event.
++ */
++static int
++i915_wait_request(struct drm_device *dev, uint32_t seqno)
++{
++ return i915_do_wait_request(dev, seqno, 1);
++}
++
++/**
++ * Waits for the ring to finish up to the latest request. Usefull for waiting
++ * for flip events, e.g for the overlay support. */
++int i915_lp_ring_sync(struct drm_device *dev)
++{
++ uint32_t seqno;
++ int ret;
++
++ seqno = i915_add_request(dev, NULL, 0);
++
++ if (seqno == 0)
++ return -ENOMEM;
++
++ ret = i915_do_wait_request(dev, seqno, 0);
++ BUG_ON(ret == -ERESTARTSYS);
++ return ret;
++}
++
+ static void
+ i915_gem_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+@@ -1936,7 +1965,7 @@ i915_gem_flush(struct drm_device *dev,
+ #endif
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+- OUT_RING(0); /* noop */
++ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
+ }
+--- a/include/drm/drm_os_linux.h
++++ b/include/drm/drm_os_linux.h
+@@ -123,5 +123,5 @@ do { \
+ remove_wait_queue(&(queue), &entry); \
+ } while (0)
+
+-#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
++#define DRM_WAKEUP( queue ) wake_up( queue )
+ #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
+From 823f68fd646da6a39a9c0d3eb4c60d69dab5aa13 Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Mon, 28 Dec 2009 13:23:36 +0800
+Subject: drm/i915: remove full registers dump debug
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit 823f68fd646da6a39a9c0d3eb4c60d69dab5aa13 upstream.
+
+This one reverts 9e3a6d155ed0a7636b926a798dd7221ea107b274.
+As reported by http://bugzilla.kernel.org/show_bug.cgi?id=14485,
+this dump will cause hang problem on some machine. If something
+really needs this kind of full registers dump, that could be done
+within intel-gpu-tools.
+
+Cc: Ben Gamari <bgamari.foss@gmail.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ drivers/gpu/drm/i915/i915_debugfs.c | 30 ------------------------------
+ 1 file changed, 30 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -384,37 +384,7 @@ out:
+ return 0;
+ }
+
+-static int i915_registers_info(struct seq_file *m, void *data) {
+- struct drm_info_node *node = (struct drm_info_node *) m->private;
+- struct drm_device *dev = node->minor->dev;
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- uint32_t reg;
+-
+-#define DUMP_RANGE(start, end) \
+- for (reg=start; reg < end; reg += 4) \
+- seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
+-
+- DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
+- DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
+- DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
+- DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
+- DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
+- DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
+- DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
+- DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
+- DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
+- DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
+- DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
+- DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
+- DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
+- DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
+-
+- return 0;
+-}
+-
+-
+ static struct drm_info_list i915_debugfs_list[] = {
+- {"i915_regs", i915_registers_info, 0},
+ {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+ {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
+ {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+From 99fcb766a3a50466fe31d743260a3400c1aee855 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Sun, 7 Feb 2010 16:20:18 +0100
+Subject: drm/i915: Update write_domains on active list after flush.
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 99fcb766a3a50466fe31d743260a3400c1aee855 upstream.
+
+Before changing the status of a buffer with a pending write we will await
+upon a new flush for that buffer. So we can take advantage of any flushes
+posted whilst the buffer is active and pending processing by the GPU, by
+clearing its write_domain and updating its last_rendering_seqno -- thus
+saving a potential flush in deep queues and improves flushing behaviour
+upon eviction for both GTT space and fences.
+
+In order to reduce the time spent searching the active list for matching
+write_domains, we move those to a separate list whose elements are
+the buffers belong to the active/flushing list with pending writes.
+
+Orignal patch by Chris Wilson <chris@chris-wilson.co.uk>, forward-ported
+by me.
+
+In addition to better performance, this also fixes a real bug. Before
+this changes, i915_gem_evict_everything didn't work as advertised. When
+the gpu was actually busy and processing request, the flush and subsequent
+wait would not move active and dirty buffers to the inactive list, but
+just to the flushing list. Which triggered the BUG_ON at the end of this
+function. With the more tight dirty buffer tracking, all currently busy and
+dirty buffers get moved to the inactive list by one i915_gem_flush operation.
+
+I've left the BUG_ON I've used to prove this in there.
+
+References:
+ Bug 25911 - 2.10.0 causes kernel oops and system hangs
+ http://bugs.freedesktop.org/show_bug.cgi?id=25911
+
+ Bug 26101 - [i915] xf86-video-intel 2.10.0 (and git) triggers kernel oops
+ within seconds after login
+ http://bugs.freedesktop.org/show_bug.cgi?id=26101
+
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Tested-by: Adam Lantos <hege@playma.org>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_drv.h | 11 +++++++++++
+ drivers/gpu/drm/i915/i915_gem.c | 23 +++++++++++++++++++----
+ 2 files changed, 30 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -467,6 +467,15 @@ typedef struct drm_i915_private {
+ struct list_head flushing_list;
+
+ /**
++ * List of objects currently pending a GPU write flush.
++ *
++ * All elements on this list will belong to either the
++ * active_list or flushing_list, last_rendering_seqno can
++ * be used to differentiate between the two elements.
++ */
++ struct list_head gpu_write_list;
++
++ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+@@ -558,6 +567,8 @@ struct drm_i915_gem_object {
+
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head list;
++ /** This object's place on GPU write list */
++ struct list_head gpu_write_list;
+
+ /** This object's place on the fenced object LRU */
+ struct list_head fence_list;
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct
+ else
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
++ BUG_ON(!list_empty(&obj_priv->gpu_write_list));
++
+ obj_priv->last_rendering_seqno = 0;
+ if (obj_priv->active) {
+ obj_priv->active = 0;
+@@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev,
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+- &dev_priv->mm.flushing_list, list) {
++ &dev_priv->mm.gpu_write_list,
++ gpu_write_list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if ((obj->write_domain & flush_domains) ==
+@@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev,
+ uint32_t old_write_domain = obj->write_domain;
+
+ obj->write_domain = 0;
++ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+@@ -2073,8 +2077,8 @@ static int
+ i915_gem_evict_everything(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- uint32_t seqno;
+ int ret;
++ uint32_t seqno;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+@@ -2096,6 +2100,8 @@ i915_gem_evict_everything(struct drm_dev
+ if (ret)
+ return ret;
+
++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret)
+ return ret;
+@@ -2690,7 +2696,7 @@ i915_gem_object_flush_gpu_write_domain(s
+ old_write_domain = obj->write_domain;
+ i915_gem_flush(dev, 0, obj->write_domain);
+ seqno = i915_add_request(dev, NULL, obj->write_domain);
+- obj->write_domain = 0;
++ BUG_ON(obj->write_domain);
+ i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+@@ -3710,16 +3716,23 @@ i915_gem_execbuffer(struct drm_device *d
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+- if (dev->flush_domains)
++ if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains);
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain = obj->write_domain;
+
+ obj->write_domain = obj->pending_write_domain;
++ if (obj->write_domain)
++ list_move_tail(&obj_priv->gpu_write_list,
++ &dev_priv->mm.gpu_write_list);
++ else
++ list_del_init(&obj_priv->gpu_write_list);
++
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
+@@ -4112,6 +4125,7 @@ int i915_gem_init_object(struct drm_gem_
+ obj_priv->obj = obj;
+ obj_priv->fence_reg = I915_FENCE_REG_NONE;
+ INIT_LIST_HEAD(&obj_priv->list);
++ INIT_LIST_HEAD(&obj_priv->gpu_write_list);
+ INIT_LIST_HEAD(&obj_priv->fence_list);
+ obj_priv->madv = I915_MADV_WILLNEED;
+
+@@ -4563,6 +4577,7 @@ i915_gem_load(struct drm_device *dev)
+ spin_lock_init(&dev_priv->mm.active_list_lock);
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
++ INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+From fd2e8ea597222b8f38ae8948776a61ea7958232e Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Tue, 9 Feb 2010 14:14:36 +0000
+Subject: drm/i915: Increase fb alignment to 64k
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit fd2e8ea597222b8f38ae8948776a61ea7958232e upstream.
+
+An untiled framebuffer must be aligned to 64k. This is normally handled
+by intel_pin_and_fence_fb_obj(), but the intelfb_create() likes to be
+different and do the pinning itself. However, it aligns the buffer
+object incorrectly for pre-i965 chipsets causing a PGTBL_ERR when it is
+installed onto the output.
+
+Fixes:
+ KMS error message while initializing modesetting -
+ render error detected: EIR: 0x10 [i915]
+ http://bugs.freedesktop.org/show_bug.cgi?id=22936
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_fb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_fb.c
++++ b/drivers/gpu/drm/i915/intel_fb.c
+@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_dev
+
+ mutex_lock(&dev->struct_mutex);
+
+- ret = i915_gem_object_pin(fbo, PAGE_SIZE);
++ ret = i915_gem_object_pin(fbo, 64*1024);
+ if (ret) {
+ DRM_ERROR("failed to pin fb: %d\n", ret);
+ goto out_unref;
+From ee25df2bc379728c45d81e04cf87984db1425edf Mon Sep 17 00:00:00 2001
+From: Jesse Barnes <jbarnes@virtuousgeek.org>
+Date: Sat, 6 Feb 2010 10:41:53 -0800
+Subject: drm/i915: handle FBC and self-refresh better
+
+From: Jesse Barnes <jbarnes@virtuousgeek.org>
+
+commit ee25df2bc379728c45d81e04cf87984db1425edf upstream.
+
+On 945, we need to avoid entering self-refresh if the compressor is
+busy, or we may cause display FIFO underruns leading to ugly flicker.
+
+Fixes fdo bug #24314, kernel bug #15043.
+
+Tested-by: Alexander Lam <lambchop468@gmail.com>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Tested-by: Julien Cristau <jcristau@debian.org> (fd.o #25371)
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_reg.h | 1 +
+ drivers/gpu/drm/i915/intel_display.c | 2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -329,6 +329,7 @@
+ #define FBC_CTL_PERIODIC (1<<30)
+ #define FBC_CTL_INTERVAL_SHIFT (16)
+ #define FBC_CTL_UNCOMPRESSIBLE (1<<14)
++#define FBC_C3_IDLE (1<<13)
+ #define FBC_CTL_STRIDE_SHIFT (5)
+ #define FBC_CTL_FENCENO (1<<0)
+ #define FBC_COMMAND 0x0320c
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -988,6 +988,8 @@ static void i8xx_enable_fbc(struct drm_c
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
++ if (IS_I945GM(dev))
++ fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+From a3cb5195f6db58dbebd8a31b877ddce082c9b63d Mon Sep 17 00:00:00 2001
+From: Zhao Yakui <yakui.zhao@intel.com>
+Date: Fri, 11 Dec 2009 09:26:10 +0800
+Subject: drm/i915: Add MALATA PC-81005 to ACPI LID quirk list
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+commit a3cb5195f6db58dbebd8a31b877ddce082c9b63d upstream.
+
+The MALATA PC-81005 laptop always reports that the LID status is closed and we
+can't use it reliabily for LVDS detection. So add this box into the quirk list.
+
+https://bugs.freedesktop.org/show_bug.cgi?id=25523
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Review-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Tested-by: Hector <hector1987@gmail.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_lvds.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -622,6 +622,13 @@ static const struct dmi_system_id bad_li
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
+ },
+ },
++ {
++ .ident = "PC-81005",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
++ },
++ },
+ { }
+ };
+
+From f034b12dbb5749b11e9390e15e93ffa87ece8038 Mon Sep 17 00:00:00 2001
+From: Zhao Yakui <yakui.zhao@intel.com>
+Date: Thu, 21 Jan 2010 15:20:18 +0800
+Subject: drm/i915: Fix the incorrect DMI string for Samsung SX20S laptop
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+commit f034b12dbb5749b11e9390e15e93ffa87ece8038 upstream.
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Reported-by: Philipp Kohlbecher <xt28@gmx.de>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_lvds.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_li
+ {
+ .ident = "Samsung SX20S",
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
+ DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
+ },
+ },
+From 40f33a92100f4d9b6e85ad642100cfe42d7ff57d Mon Sep 17 00:00:00 2001
+From: Zhao Yakui <yakui.zhao@intel.com>
+Date: Wed, 6 Jan 2010 13:30:36 +0800
+Subject: drm/i915: Add HP nx9020/SamsungSX20S to ACPI LID quirk list
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+commit 40f33a92100f4d9b6e85ad642100cfe42d7ff57d upstream.
+
+The HP comaq nx9020/Samsung SX20S laptop always report that the LID status is
+closed and we can't use it reliabily for LVDS detection. So add the two boxes
+into the quirk list.
+
+http://bugzilla.kernel.org/show_bug.cgi?id=14957
+http://bugzilla.kernel.org/show_bug.cgi?id=14554
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_lvds.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -602,6 +602,20 @@ static void intel_lvds_mode_set(struct d
+ /* Some lid devices report incorrect lid status, assume they're connected */
+ static const struct dmi_system_id bad_lid_status[] = {
+ {
++ .ident = "Compaq nx9020",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_BOARD_NAME, "3084"),
++ },
++ },
++ {
++ .ident = "Samsung SX20S",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
++ DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
++ },
++ },
++ {
+ .ident = "Aspire One",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+From f0217c42c9ab3d772e543f635ce628b9478f70b6 Mon Sep 17 00:00:00 2001
+From: Eric Anholt <eric@anholt.net>
+Date: Tue, 1 Dec 2009 11:56:30 -0800
+Subject: drm/i915: Fix DDC on some systems by clearing BIOS GMBUS setup.
+
+From: Eric Anholt <eric@anholt.net>
+
+commit f0217c42c9ab3d772e543f635ce628b9478f70b6 upstream.
+
+This is a sync of a fix I made in the old UMS code. If the BIOS uses
+the GMBUS and doesn't clear that setup, then our bit-banging I2C can
+fail, leading to monitors not being detected.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Cc: maximilian attems <max@stro.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_reg.h | 14 ++++++++++++++
+ drivers/gpu/drm/i915/i915_suspend.c | 5 ++++-
+ drivers/gpu/drm/i915/intel_drv.h | 2 ++
+ drivers/gpu/drm/i915/intel_i2c.c | 19 +++++++++++++++++++
+ 4 files changed, 39 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -405,6 +405,13 @@
+ # define GPIO_DATA_VAL_IN (1 << 12)
+ # define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
++#define GMBUS0 0x5100
++#define GMBUS1 0x5104
++#define GMBUS2 0x5108
++#define GMBUS3 0x510c
++#define GMBUS4 0x5110
++#define GMBUS5 0x5120
++
+ /*
+ * Clock control & power management
+ */
+@@ -2153,6 +2160,13 @@
+ #define PCH_GPIOE 0xc5020
+ #define PCH_GPIOF 0xc5024
+
++#define PCH_GMBUS0 0xc5100
++#define PCH_GMBUS1 0xc5104
++#define PCH_GMBUS2 0xc5108
++#define PCH_GMBUS3 0xc510c
++#define PCH_GMBUS4 0xc5110
++#define PCH_GMBUS5 0xc5120
++
+ #define PCH_DPLL_A 0xc6014
+ #define PCH_DPLL_B 0xc6018
+
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -27,7 +27,7 @@
+ #include "drmP.h"
+ #include "drm.h"
+ #include "i915_drm.h"
+-#include "i915_drv.h"
++#include "intel_drv.h"
+
+ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+ {
+@@ -846,6 +846,9 @@ int i915_restore_state(struct drm_device
+ for (i = 0; i < 3; i++)
+ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
++ /* I2C state */
++ intel_i2c_reset_gmbus(dev);
++
+ return 0;
+ }
+
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -134,6 +134,8 @@ void intel_i2c_destroy(struct i2c_adapte
+ int intel_ddc_get_modes(struct intel_output *intel_output);
+ extern bool intel_ddc_probe(struct intel_output *intel_output);
+ void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
++void intel_i2c_reset_gmbus(struct drm_device *dev);
++
+ extern void intel_crt_init(struct drm_device *dev);
+ extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -118,6 +118,23 @@ static void set_data(void *data, int sta
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+ }
+
++/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
++ * engine, but if the BIOS leaves it enabled, then that can break our use
++ * of the bit-banging I2C interfaces. This is notably the case with the
++ * Mac Mini in EFI mode.
++ */
++void
++intel_i2c_reset_gmbus(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ if (IS_IGDNG(dev)) {
++ I915_WRITE(PCH_GMBUS0, 0);
++ } else {
++ I915_WRITE(GMBUS0, 0);
++ }
++}
++
+ /**
+ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+@@ -168,6 +185,8 @@ struct i2c_adapter *intel_i2c_create(str
+ if(i2c_bit_add_bus(&chan->adapter))
+ goto out_free;
+
++ intel_i2c_reset_gmbus(dev);
++
+ /* JJJ: raise SCL and SDA? */
+ intel_i2c_quirk_set(dev, true);
+ set_data(chan, 1);
+From 33c5fd121eabbccc9103daf6cda36941eb3c349f Mon Sep 17 00:00:00 2001
+From: David John <davidjon@xenontk.org>
+Date: Wed, 27 Jan 2010 15:19:08 +0530
+Subject: drm/i915: Disable SR when more than one pipe is enabled
+
+From: David John <davidjon@xenontk.org>
+
+commit 33c5fd121eabbccc9103daf6cda36941eb3c349f upstream.
+
+Self Refresh should be disabled on dual plane configs. Otherwise, as
+the SR watermark is not calculated for such configs, switching to non
+VGA mode causes FIFO underrun and display flicker.
+
+This fixes Korg Bug #14897.
+
+Signed-off-by: David John <davidjon@xenontk.org>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2538,6 +2538,10 @@ static void g4x_update_wm(struct drm_dev
+ sr_entries = roundup(sr_entries / cacheline_size, 1);
+ DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
++ } else {
++ /* Turn off self refresh if both pipes are enabled */
++ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
++ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
+@@ -2581,6 +2585,10 @@ static void i965_update_wm(struct drm_de
+ srwm = 1;
+ srwm &= 0x3f;
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
++ } else {
++ /* Turn off self refresh if both pipes are enabled */
++ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
++ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+@@ -2649,6 +2657,10 @@ static void i9xx_update_wm(struct drm_de
+ if (srwm < 0)
+ srwm = 1;
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
++ } else {
++ /* Turn off self refresh if both pipes are enabled */
++ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
++ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+From 1dc7546d1a73664e5d117715b214bea9cae5951c Mon Sep 17 00:00:00 2001
+From: Jesse Barnes <jbarnes@jbarnes-x200.(none)>
+Date: Mon, 19 Oct 2009 10:08:17 +0900
+Subject: drm/i915: enable self-refresh on 965
+
+From: Jesse Barnes <jbarnes@jbarnes-x200.(none)>
+
+commit 1dc7546d1a73664e5d117715b214bea9cae5951c upstream.
+
+Need to calculate the SR watermark and enable it.
+
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 32 ++++++++++++++++++++++++++++----
+ 1 file changed, 28 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2556,15 +2556,39 @@ static void g4x_update_wm(struct drm_dev
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ }
+
+-static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
+- int unused3, int unused4)
++static void i965_update_wm(struct drm_device *dev, int planea_clock,
++ int planeb_clock, int sr_hdisplay, int pixel_size)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned long line_time_us;
++ int sr_clock, sr_entries, srwm = 1;
+
+- DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
++ /* Calc sr entries for one plane configs */
++ if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
++ /* self-refresh has much higher latency */
++ const static int sr_latency_ns = 12000;
++
++ sr_clock = planea_clock ? planea_clock : planeb_clock;
++ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
++
++ /* Use ns/us then divide to preserve precision */
++ sr_entries = (((sr_latency_ns / line_time_us) + 1) *
++ pixel_size * sr_hdisplay) / 1000;
++ sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
++ DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
++ srwm = I945_FIFO_SIZE - sr_entries;
++ if (srwm < 0)
++ srwm = 1;
++ srwm &= 0x3f;
++ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
++ }
++
++ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
++ srwm);
+
+ /* 965 has limitations... */
+- I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
++ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
++ (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ }
+
+From eceb784cec4dc0fcc2993d9ee4a7c0d111ada80a Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Mon, 25 Jan 2010 10:35:16 +0800
+Subject: drm/i915: disable hotplug detect before Ironlake CRT detect
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit eceb784cec4dc0fcc2993d9ee4a7c0d111ada80a upstream.
+
+This tries to fix CRT detect loop hang seen on some Ironlake form
+factor, to clear up hotplug detect state before taking CRT detect
+to make sure next hotplug detect cycle is consistent.
+
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_crt.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -185,6 +185,9 @@ static bool intel_igdng_crt_detect_hotpl
+ adpa = I915_READ(PCH_ADPA);
+
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
++ /* disable HPD first */
++ I915_WRITE(PCH_ADPA, adpa);
++ (void)I915_READ(PCH_ADPA);
+
+ adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
+ ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/freed-ora/current/F-12/linux-2.6-usb-uvc-autosuspend.diff b/freed-ora/current/F-12/linux-2.6-usb-uvc-autosuspend.diff
new file mode 100644
index 000000000..b7c7f6e0f
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-usb-uvc-autosuspend.diff
@@ -0,0 +1,19 @@
+commit 9d4c919bcfa794c054cc33155c7e3c53ac2c5684
+Author: Matthew Garrett <mjg@redhat.com>
+Date: Sun Jul 19 02:24:49 2009 +0100
+
+ Enable autosuspend on UVC by default
+
+diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
+index 89927b7..8de516b 100644
+--- a/drivers/media/video/uvc/uvc_driver.c
++++ b/drivers/media/video/uvc/uvc_driver.c
+@@ -1647,6 +1647,8 @@ static int uvc_probe(struct usb_interface *intf,
+ "supported.\n", ret);
+ }
+
++ usb_device_autosuspend_enable(udev);
++
+ uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
+ return 0;
+
diff --git a/freed-ora/current/F-12/linux-2.6-usb-wwan-update.patch b/freed-ora/current/F-12/linux-2.6-usb-wwan-update.patch
new file mode 100644
index 000000000..d6ab3d38d
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-usb-wwan-update.patch
@@ -0,0 +1,1634 @@
+diff -up linux-2.6.32.noarch/drivers/usb/serial/Kconfig.orig linux-2.6.32.noarch/drivers/usb/serial/Kconfig
+--- linux-2.6.32.noarch/drivers/usb/serial/Kconfig.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/drivers/usb/serial/Kconfig 2010-04-01 12:52:26.989997164 -0400
+@@ -565,8 +565,12 @@ config USB_SERIAL_XIRCOM
+ To compile this driver as a module, choose M here: the
+ module will be called keyspan_pda.
+
++config USB_SERIAL_WWAN
++ tristate
++
+ config USB_SERIAL_OPTION
+ tristate "USB driver for GSM and CDMA modems"
++ select USB_SERIAL_WWAN
+ help
+ Say Y here if you have a GSM or CDMA modem that's connected to USB.
+
+diff -up linux-2.6.32.noarch/drivers/usb/serial/Makefile.orig linux-2.6.32.noarch/drivers/usb/serial/Makefile
+--- linux-2.6.32.noarch/drivers/usb/serial/Makefile.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/drivers/usb/serial/Makefile 2010-04-01 12:52:26.992996185 -0400
+@@ -51,6 +51,7 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) +=
+ obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
+ obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o
+ obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o
++obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o
+ obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
+ obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
+ obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
+diff -up linux-2.6.32.noarch/drivers/usb/serial/option.c.orig linux-2.6.32.noarch/drivers/usb/serial/option.c
+--- linux-2.6.32.noarch/drivers/usb/serial/option.c.orig 2010-04-01 12:51:42.346995579 -0400
++++ linux-2.6.32.noarch/drivers/usb/serial/option.c 2010-04-01 12:53:47.537995720 -0400
+@@ -41,35 +41,14 @@
+ #include <linux/bitops.h>
+ #include <linux/usb.h>
+ #include <linux/usb/serial.h>
++#include "usb-wwan.h"
+
+ /* Function prototypes */
+ static int option_probe(struct usb_serial *serial,
+ const struct usb_device_id *id);
+-static int option_open(struct tty_struct *tty, struct usb_serial_port *port);
+-static void option_close(struct usb_serial_port *port);
+-static void option_dtr_rts(struct usb_serial_port *port, int on);
+-
+-static int option_startup(struct usb_serial *serial);
+-static void option_disconnect(struct usb_serial *serial);
+-static void option_release(struct usb_serial *serial);
+-static int option_write_room(struct tty_struct *tty);
+-
++static int option_send_setup(struct usb_serial_port *port);
+ static void option_instat_callback(struct urb *urb);
+
+-static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
+- const unsigned char *buf, int count);
+-static int option_chars_in_buffer(struct tty_struct *tty);
+-static void option_set_termios(struct tty_struct *tty,
+- struct usb_serial_port *port, struct ktermios *old);
+-static int option_tiocmget(struct tty_struct *tty, struct file *file);
+-static int option_tiocmset(struct tty_struct *tty, struct file *file,
+- unsigned int set, unsigned int clear);
+-static int option_send_setup(struct usb_serial_port *port);
+-#ifdef CONFIG_PM
+-static int option_suspend(struct usb_serial *serial, pm_message_t message);
+-static int option_resume(struct usb_serial *serial);
+-#endif
+-
+ /* Vendor and product IDs */
+ #define OPTION_VENDOR_ID 0x0AF0
+ #define OPTION_PRODUCT_COLT 0x5000
+@@ -677,22 +656,22 @@ static struct usb_serial_driver option_1
+ .id_table = option_ids,
+ .num_ports = 1,
+ .probe = option_probe,
+- .open = option_open,
+- .close = option_close,
+- .dtr_rts = option_dtr_rts,
+- .write = option_write,
+- .write_room = option_write_room,
+- .chars_in_buffer = option_chars_in_buffer,
+- .set_termios = option_set_termios,
+- .tiocmget = option_tiocmget,
+- .tiocmset = option_tiocmset,
+- .attach = option_startup,
+- .disconnect = option_disconnect,
+- .release = option_release,
++ .open = usb_wwan_open,
++ .close = usb_wwan_close,
++ .dtr_rts = usb_wwan_dtr_rts,
++ .write = usb_wwan_write,
++ .write_room = usb_wwan_write_room,
++ .chars_in_buffer = usb_wwan_chars_in_buffer,
++ .set_termios = usb_wwan_set_termios,
++ .tiocmget = usb_wwan_tiocmget,
++ .tiocmset = usb_wwan_tiocmset,
++ .attach = usb_wwan_startup,
++ .disconnect = usb_wwan_disconnect,
++ .release = usb_wwan_release,
+ .read_int_callback = option_instat_callback,
+ #ifdef CONFIG_PM
+- .suspend = option_suspend,
+- .resume = option_resume,
++ .suspend = usb_wwan_suspend,
++ .resume = usb_wwan_resume,
+ #endif
+ };
+
+@@ -705,12 +684,6 @@ static int debug;
+ #define IN_BUFLEN 4096
+ #define OUT_BUFLEN 4096
+
+-struct option_intf_private {
+- spinlock_t susp_lock;
+- unsigned int suspended:1;
+- int in_flight;
+-};
+-
+ struct option_port_private {
+ /* Input endpoints and buffer for this port */
+ struct urb *in_urbs[N_IN_URB];
+@@ -767,216 +740,28 @@ module_exit(option_exit);
+ static int option_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+ {
+- struct option_intf_private *data;
++ struct usb_wwan_intf_private *data;
+
+ /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
+ if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
+ serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
+ serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
+ return -ENODEV;
+
+ /* Bandrich modem and AT command interface is 0xff */
+ if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID ||
+ serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) &&
+ serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
+ return -ENODEV;
+
+- data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
++ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
++ data->send_setup = option_send_setup;
+ spin_lock_init(&data->susp_lock);
+ return 0;
+ }
+
+-static void option_set_termios(struct tty_struct *tty,
+- struct usb_serial_port *port, struct ktermios *old_termios)
+-{
+- dbg("%s", __func__);
+- /* Doesn't support option setting */
+- tty_termios_copy_hw(tty->termios, old_termios);
+- option_send_setup(port);
+-}
+-
+-static int option_tiocmget(struct tty_struct *tty, struct file *file)
+-{
+- struct usb_serial_port *port = tty->driver_data;
+- unsigned int value;
+- struct option_port_private *portdata;
+-
+- portdata = usb_get_serial_port_data(port);
+-
+- value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
+- ((portdata->dtr_state) ? TIOCM_DTR : 0) |
+- ((portdata->cts_state) ? TIOCM_CTS : 0) |
+- ((portdata->dsr_state) ? TIOCM_DSR : 0) |
+- ((portdata->dcd_state) ? TIOCM_CAR : 0) |
+- ((portdata->ri_state) ? TIOCM_RNG : 0);
+-
+- return value;
+-}
+-
+-static int option_tiocmset(struct tty_struct *tty, struct file *file,
+- unsigned int set, unsigned int clear)
+-{
+- struct usb_serial_port *port = tty->driver_data;
+- struct option_port_private *portdata;
+-
+- portdata = usb_get_serial_port_data(port);
+-
+- /* FIXME: what locks portdata fields ? */
+- if (set & TIOCM_RTS)
+- portdata->rts_state = 1;
+- if (set & TIOCM_DTR)
+- portdata->dtr_state = 1;
+-
+- if (clear & TIOCM_RTS)
+- portdata->rts_state = 0;
+- if (clear & TIOCM_DTR)
+- portdata->dtr_state = 0;
+- return option_send_setup(port);
+-}
+-
+-/* Write */
+-static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
+- const unsigned char *buf, int count)
+-{
+- struct option_port_private *portdata;
+- struct option_intf_private *intfdata;
+- int i;
+- int left, todo;
+- struct urb *this_urb = NULL; /* spurious */
+- int err;
+- unsigned long flags;
+-
+- portdata = usb_get_serial_port_data(port);
+- intfdata = port->serial->private;
+-
+- dbg("%s: write (%d chars)", __func__, count);
+-
+- i = 0;
+- left = count;
+- for (i = 0; left > 0 && i < N_OUT_URB; i++) {
+- todo = left;
+- if (todo > OUT_BUFLEN)
+- todo = OUT_BUFLEN;
+-
+- this_urb = portdata->out_urbs[i];
+- if (test_and_set_bit(i, &portdata->out_busy)) {
+- if (time_before(jiffies,
+- portdata->tx_start_time[i] + 10 * HZ))
+- continue;
+- usb_unlink_urb(this_urb);
+- continue;
+- }
+- dbg("%s: endpoint %d buf %d", __func__,
+- usb_pipeendpoint(this_urb->pipe), i);
+-
+- err = usb_autopm_get_interface_async(port->serial->interface);
+- if (err < 0)
+- break;
+-
+- /* send the data */
+- memcpy(this_urb->transfer_buffer, buf, todo);
+- this_urb->transfer_buffer_length = todo;
+-
+- spin_lock_irqsave(&intfdata->susp_lock, flags);
+- if (intfdata->suspended) {
+- usb_anchor_urb(this_urb, &portdata->delayed);
+- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+- } else {
+- intfdata->in_flight++;
+- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+- err = usb_submit_urb(this_urb, GFP_ATOMIC);
+- if (err) {
+- dbg("usb_submit_urb %p (write bulk) failed "
+- "(%d)", this_urb, err);
+- clear_bit(i, &portdata->out_busy);
+- spin_lock_irqsave(&intfdata->susp_lock, flags);
+- intfdata->in_flight--;
+- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+- continue;
+- }
+- }
+-
+- portdata->tx_start_time[i] = jiffies;
+- buf += todo;
+- left -= todo;
+- }
+-
+- count -= left;
+- dbg("%s: wrote (did %d)", __func__, count);
+- return count;
+-}
+-
+-static void option_indat_callback(struct urb *urb)
+-{
+- int err;
+- int endpoint;
+- struct usb_serial_port *port;
+- struct tty_struct *tty;
+- unsigned char *data = urb->transfer_buffer;
+- int status = urb->status;
+-
+- dbg("%s: %p", __func__, urb);
+-
+- endpoint = usb_pipeendpoint(urb->pipe);
+- port = urb->context;
+-
+- if (status) {
+- dbg("%s: nonzero status: %d on endpoint %02x.",
+- __func__, status, endpoint);
+- } else {
+- tty = tty_port_tty_get(&port->port);
+- if (urb->actual_length) {
+- tty_buffer_request_room(tty, urb->actual_length);
+- tty_insert_flip_string(tty, data, urb->actual_length);
+- tty_flip_buffer_push(tty);
+- } else
+- dbg("%s: empty read urb received", __func__);
+- tty_kref_put(tty);
+-
+- /* Resubmit urb so we continue receiving */
+- if (port->port.count && status != -ESHUTDOWN) {
+- err = usb_submit_urb(urb, GFP_ATOMIC);
+- if (err)
+- printk(KERN_ERR "%s: resubmit read urb failed. "
+- "(%d)", __func__, err);
+- else
+- usb_mark_last_busy(port->serial->dev);
+- }
+-
+- }
+- return;
+-}
+-
+-static void option_outdat_callback(struct urb *urb)
+-{
+- struct usb_serial_port *port;
+- struct option_port_private *portdata;
+- struct option_intf_private *intfdata;
+- int i;
+-
+- dbg("%s", __func__);
+-
+- port = urb->context;
+- intfdata = port->serial->private;
+-
+- usb_serial_port_softint(port);
+- usb_autopm_put_interface_async(port->serial->interface);
+- portdata = usb_get_serial_port_data(port);
+- spin_lock(&intfdata->susp_lock);
+- intfdata->in_flight--;
+- spin_unlock(&intfdata->susp_lock);
+-
+- for (i = 0; i < N_OUT_URB; ++i) {
+- if (portdata->out_urbs[i] == urb) {
+- smp_mb__before_clear_bit();
+- clear_bit(i, &portdata->out_busy);
+- break;
+- }
+- }
+-}
+-
+ static void option_instat_callback(struct urb *urb)
+ {
+ int err;
+@@ -1026,183 +811,6 @@ static void option_instat_callback(struc
+ }
+ }
+
+-static int option_write_room(struct tty_struct *tty)
+-{
+- struct usb_serial_port *port = tty->driver_data;
+- struct option_port_private *portdata;
+- int i;
+- int data_len = 0;
+- struct urb *this_urb;
+-
+- portdata = usb_get_serial_port_data(port);
+-
+- for (i = 0; i < N_OUT_URB; i++) {
+- this_urb = portdata->out_urbs[i];
+- if (this_urb && !test_bit(i, &portdata->out_busy))
+- data_len += OUT_BUFLEN;
+- }
+-
+- dbg("%s: %d", __func__, data_len);
+- return data_len;
+-}
+-
+-static int option_chars_in_buffer(struct tty_struct *tty)
+-{
+- struct usb_serial_port *port = tty->driver_data;
+- struct option_port_private *portdata;
+- int i;
+- int data_len = 0;
+- struct urb *this_urb;
+-
+- portdata = usb_get_serial_port_data(port);
+-
+- for (i = 0; i < N_OUT_URB; i++) {
+- this_urb = portdata->out_urbs[i];
+- /* FIXME: This locking is insufficient as this_urb may
+- go unused during the test */
+- if (this_urb && test_bit(i, &portdata->out_busy))
+- data_len += this_urb->transfer_buffer_length;
+- }
+- dbg("%s: %d", __func__, data_len);
+- return data_len;
+-}
+-
+-static int option_open(struct tty_struct *tty, struct usb_serial_port *port)
+-{
+- struct option_port_private *portdata;
+- struct option_intf_private *intfdata;
+- struct usb_serial *serial = port->serial;
+- int i, err;
+- struct urb *urb;
+-
+- portdata = usb_get_serial_port_data(port);
+- intfdata = serial->private;
+-
+- dbg("%s", __func__);
+-
+- /* Start reading from the IN endpoint */
+- for (i = 0; i < N_IN_URB; i++) {
+- urb = portdata->in_urbs[i];
+- if (!urb)
+- continue;
+- err = usb_submit_urb(urb, GFP_KERNEL);
+- if (err) {
+- dbg("%s: submit urb %d failed (%d) %d",
+- __func__, i, err,
+- urb->transfer_buffer_length);
+- }
+- }
+-
+- option_send_setup(port);
+-
+- serial->interface->needs_remote_wakeup = 1;
+- spin_lock_irq(&intfdata->susp_lock);
+- portdata->opened = 1;
+- spin_unlock_irq(&intfdata->susp_lock);
+- usb_autopm_put_interface(serial->interface);
+-
+- return 0;
+-}
+-
+-static void option_dtr_rts(struct usb_serial_port *port, int on)
+-{
+- struct usb_serial *serial = port->serial;
+- struct option_port_private *portdata;
+-
+- dbg("%s", __func__);
+- portdata = usb_get_serial_port_data(port);
+- mutex_lock(&serial->disc_mutex);
+- portdata->rts_state = on;
+- portdata->dtr_state = on;
+- if (serial->dev)
+- option_send_setup(port);
+- mutex_unlock(&serial->disc_mutex);
+-}
+-
+-
+-static void option_close(struct usb_serial_port *port)
+-{
+- int i;
+- struct usb_serial *serial = port->serial;
+- struct option_port_private *portdata;
+- struct option_intf_private *intfdata = port->serial->private;
+-
+- dbg("%s", __func__);
+- portdata = usb_get_serial_port_data(port);
+-
+- if (serial->dev) {
+- /* Stop reading/writing urbs */
+- spin_lock_irq(&intfdata->susp_lock);
+- portdata->opened = 0;
+- spin_unlock_irq(&intfdata->susp_lock);
+-
+- for (i = 0; i < N_IN_URB; i++)
+- usb_kill_urb(portdata->in_urbs[i]);
+- for (i = 0; i < N_OUT_URB; i++)
+- usb_kill_urb(portdata->out_urbs[i]);
+- usb_autopm_get_interface(serial->interface);
+- serial->interface->needs_remote_wakeup = 0;
+- }
+-}
+-
+-/* Helper functions used by option_setup_urbs */
+-static struct urb *option_setup_urb(struct usb_serial *serial, int endpoint,
+- int dir, void *ctx, char *buf, int len,
+- void (*callback)(struct urb *))
+-{
+- struct urb *urb;
+-
+- if (endpoint == -1)
+- return NULL; /* endpoint not needed */
+-
+- urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
+- if (urb == NULL) {
+- dbg("%s: alloc for endpoint %d failed.", __func__, endpoint);
+- return NULL;
+- }
+-
+- /* Fill URB using supplied data. */
+- usb_fill_bulk_urb(urb, serial->dev,
+- usb_sndbulkpipe(serial->dev, endpoint) | dir,
+- buf, len, callback, ctx);
+-
+- return urb;
+-}
+-
+-/* Setup urbs */
+-static void option_setup_urbs(struct usb_serial *serial)
+-{
+- int i, j;
+- struct usb_serial_port *port;
+- struct option_port_private *portdata;
+-
+- dbg("%s", __func__);
+-
+- for (i = 0; i < serial->num_ports; i++) {
+- port = serial->port[i];
+- portdata = usb_get_serial_port_data(port);
+-
+- /* Do indat endpoints first */
+- for (j = 0; j < N_IN_URB; ++j) {
+- portdata->in_urbs[j] = option_setup_urb(serial,
+- port->bulk_in_endpointAddress,
+- USB_DIR_IN, port,
+- portdata->in_buffer[j],
+- IN_BUFLEN, option_indat_callback);
+- }
+-
+- /* outdat endpoints */
+- for (j = 0; j < N_OUT_URB; ++j) {
+- portdata->out_urbs[j] = option_setup_urb(serial,
+- port->bulk_out_endpointAddress,
+- USB_DIR_OUT, port,
+- portdata->out_buffer[j],
+- OUT_BUFLEN, option_outdat_callback);
+- }
+- }
+-}
+-
+-
+ /** send RTS/DTR state to the port.
+ *
+ * This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN
+@@ -1228,224 +836,6 @@ static int option_send_setup(struct usb_
+ 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ }
+
+-static int option_startup(struct usb_serial *serial)
+-{
+- int i, j, err;
+- struct usb_serial_port *port;
+- struct option_port_private *portdata;
+- u8 *buffer;
+-
+- dbg("%s", __func__);
+-
+- /* Now setup per port private data */
+- for (i = 0; i < serial->num_ports; i++) {
+- port = serial->port[i];
+- portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
+- if (!portdata) {
+- dbg("%s: kmalloc for option_port_private (%d) failed!.",
+- __func__, i);
+- return 1;
+- }
+- init_usb_anchor(&portdata->delayed);
+-
+- for (j = 0; j < N_IN_URB; j++) {
+- buffer = (u8 *)__get_free_page(GFP_KERNEL);
+- if (!buffer)
+- goto bail_out_error;
+- portdata->in_buffer[j] = buffer;
+- }
+-
+- for (j = 0; j < N_OUT_URB; j++) {
+- buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
+- if (!buffer)
+- goto bail_out_error2;
+- portdata->out_buffer[j] = buffer;
+- }
+-
+- usb_set_serial_port_data(port, portdata);
+-
+- if (!port->interrupt_in_urb)
+- continue;
+- err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+- if (err)
+- dbg("%s: submit irq_in urb failed %d",
+- __func__, err);
+- }
+- option_setup_urbs(serial);
+- return 0;
+-
+-bail_out_error2:
+- for (j = 0; j < N_OUT_URB; j++)
+- kfree(portdata->out_buffer[j]);
+-bail_out_error:
+- for (j = 0; j < N_IN_URB; j++)
+- if (portdata->in_buffer[j])
+- free_page((unsigned long)portdata->in_buffer[j]);
+- kfree(portdata);
+- return 1;
+-}
+-
+-static void stop_read_write_urbs(struct usb_serial *serial)
+-{
+- int i, j;
+- struct usb_serial_port *port;
+- struct option_port_private *portdata;
+-
+- /* Stop reading/writing urbs */
+- for (i = 0; i < serial->num_ports; ++i) {
+- port = serial->port[i];
+- portdata = usb_get_serial_port_data(port);
+- for (j = 0; j < N_IN_URB; j++)
+- usb_kill_urb(portdata->in_urbs[j]);
+- for (j = 0; j < N_OUT_URB; j++)
+- usb_kill_urb(portdata->out_urbs[j]);
+- }
+-}
+-
+-static void option_disconnect(struct usb_serial *serial)
+-{
+- dbg("%s", __func__);
+-
+- stop_read_write_urbs(serial);
+-}
+-
+-static void option_release(struct usb_serial *serial)
+-{
+- int i, j;
+- struct usb_serial_port *port;
+- struct option_port_private *portdata;
+-
+- dbg("%s", __func__);
+-
+- /* Now free them */
+- for (i = 0; i < serial->num_ports; ++i) {
+- port = serial->port[i];
+- portdata = usb_get_serial_port_data(port);
+-
+- for (j = 0; j < N_IN_URB; j++) {
+- if (portdata->in_urbs[j]) {
+- usb_free_urb(portdata->in_urbs[j]);
+- free_page((unsigned long)
+- portdata->in_buffer[j]);
+- portdata->in_urbs[j] = NULL;
+- }
+- }
+- for (j = 0; j < N_OUT_URB; j++) {
+- if (portdata->out_urbs[j]) {
+- usb_free_urb(portdata->out_urbs[j]);
+- kfree(portdata->out_buffer[j]);
+- portdata->out_urbs[j] = NULL;
+- }
+- }
+- }
+-
+- /* Now free per port private data */
+- for (i = 0; i < serial->num_ports; i++) {
+- port = serial->port[i];
+- kfree(usb_get_serial_port_data(port));
+- }
+-}
+-
+-#ifdef CONFIG_PM
+-static int option_suspend(struct usb_serial *serial, pm_message_t message)
+-{
+- struct option_intf_private *intfdata = serial->private;
+- int b;
+-
+- dbg("%s entered", __func__);
+-
+- if (serial->dev->auto_pm) {
+- spin_lock_irq(&intfdata->susp_lock);
+- b = intfdata->in_flight;
+- spin_unlock_irq(&intfdata->susp_lock);
+-
+- if (b)
+- return -EBUSY;
+- }
+-
+- spin_lock_irq(&intfdata->susp_lock);
+- intfdata->suspended = 1;
+- spin_unlock_irq(&intfdata->susp_lock);
+- stop_read_write_urbs(serial);
+-
+- return 0;
+-}
+-
+-static void play_delayed(struct usb_serial_port *port)
+-{
+- struct option_intf_private *data;
+- struct option_port_private *portdata;
+- struct urb *urb;
+- int err;
+-
+- portdata = usb_get_serial_port_data(port);
+- data = port->serial->private;
+- while ((urb = usb_get_from_anchor(&portdata->delayed))) {
+- err = usb_submit_urb(urb, GFP_ATOMIC);
+- if (!err)
+- data->in_flight++;
+- }
+-}
+-
+-static int option_resume(struct usb_serial *serial)
+-{
+- int i, j;
+- struct usb_serial_port *port;
+- struct option_intf_private *intfdata = serial->private;
+- struct option_port_private *portdata;
+- struct urb *urb;
+- int err = 0;
+-
+- dbg("%s entered", __func__);
+- /* get the interrupt URBs resubmitted unconditionally */
+- for (i = 0; i < serial->num_ports; i++) {
+- port = serial->port[i];
+- if (!port->interrupt_in_urb) {
+- dbg("%s: No interrupt URB for port %d\n", __func__, i);
+- continue;
+- }
+- err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+- dbg("Submitted interrupt URB for port %d (result %d)", i, err);
+- if (err < 0) {
+- err("%s: Error %d for interrupt URB of port%d",
+- __func__, err, i);
+- goto err_out;
+- }
+- }
+-
+- for (i = 0; i < serial->num_ports; i++) {
+- /* walk all ports */
+- port = serial->port[i];
+- portdata = usb_get_serial_port_data(port);
+-
+- /* skip closed ports */
+- spin_lock_irq(&intfdata->susp_lock);
+- if (!portdata->opened) {
+- spin_unlock_irq(&intfdata->susp_lock);
+- continue;
+- }
+-
+- for (j = 0; j < N_IN_URB; j++) {
+- urb = portdata->in_urbs[j];
+- err = usb_submit_urb(urb, GFP_ATOMIC);
+- if (err < 0) {
+- err("%s: Error %d for bulk URB %d",
+- __func__, err, i);
+- spin_unlock_irq(&intfdata->susp_lock);
+- goto err_out;
+- }
+- }
+- play_delayed(port);
+- spin_unlock_irq(&intfdata->susp_lock);
+- }
+- spin_lock_irq(&intfdata->susp_lock);
+- intfdata->suspended = 0;
+- spin_unlock_irq(&intfdata->susp_lock);
+-err_out:
+- return err;
+-}
+-#endif
+-
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_VERSION(DRIVER_VERSION);
+diff -up linux-2.6.32.noarch/drivers/usb/serial/qcserial.c.orig linux-2.6.32.noarch/drivers/usb/serial/qcserial.c
+--- linux-2.6.32.noarch/drivers/usb/serial/qcserial.c.orig 2009-12-02 22:51:21.000000000 -0500
++++ linux-2.6.32.noarch/drivers/usb/serial/qcserial.c 2010-04-01 12:52:26.997995807 -0400
+@@ -15,13 +15,14 @@
+ #include <linux/tty_flip.h>
+ #include <linux/usb.h>
+ #include <linux/usb/serial.h>
++#include "usb-wwan.h"
+
+ #define DRIVER_AUTHOR "Qualcomm Inc"
+ #define DRIVER_DESC "Qualcomm USB Serial driver"
+
+ static int debug;
+
+-static struct usb_device_id id_table[] = {
++static const struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
+ {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+ {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
+@@ -47,6 +48,37 @@ static struct usb_device_id id_table[] =
+ {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
+ {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
+ {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
++ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
++ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
++ {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
++ {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
++ {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
++ {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
++ {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
++ {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
++ {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
++ {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
++ {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
++ {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
++ {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
++ {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
++ {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
++ {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
++ {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
++ {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
++ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
++ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
++ {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
++ {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+@@ -63,6 +95,8 @@ static struct usb_driver qcdriver = {
+
+ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ {
++ struct usb_wwan_intf_private *data;
++ struct usb_host_interface *intf = serial->interface->cur_altsetting;
+ int retval = -ENODEV;
+ __u8 nintf;
+ __u8 ifnum;
+@@ -71,33 +105,45 @@ static int qcprobe(struct usb_serial *se
+
+ nintf = serial->dev->actconfig->desc.bNumInterfaces;
+ dbg("Num Interfaces = %d", nintf);
+- ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
++ ifnum = intf->desc.bInterfaceNumber;
+ dbg("This Interface = %d", ifnum);
+
++ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private),
++ GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ spin_lock_init(&data->susp_lock);
++
+ switch (nintf) {
+ case 1:
+ /* QDL mode */
+- if (serial->interface->num_altsetting == 2) {
+- struct usb_host_interface *intf;
+-
++ /* Gobi 2000 has a single altsetting, older ones have two */
++ if (serial->interface->num_altsetting == 2)
+ intf = &serial->interface->altsetting[1];
+- if (intf->desc.bNumEndpoints == 2) {
+- if (usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
+- usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
+- dbg("QDL port found");
+- retval = usb_set_interface(serial->dev, ifnum, 1);
+- if (retval < 0) {
+- dev_err(&serial->dev->dev,
+- "Could not set interface, error %d\n",
+- retval);
+- retval = -ENODEV;
+- }
+- return retval;
+- }
++ else if (serial->interface->num_altsetting > 2)
++ break;
++
++ if (intf->desc.bNumEndpoints == 2 &&
++ usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
++ usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
++ dbg("QDL port found");
++
++ if (serial->interface->num_altsetting == 1)
++ return 0;
++
++ retval = usb_set_interface(serial->dev, ifnum, 1);
++ if (retval < 0) {
++ dev_err(&serial->dev->dev,
++ "Could not set interface, error %d\n",
++ retval);
++ retval = -ENODEV;
+ }
++ return retval;
+ }
+ break;
+
++ case 3:
+ case 4:
+ /* Composite mode */
+ if (ifnum == 2) {
+@@ -132,6 +178,18 @@ static struct usb_serial_driver qcdevice
+ .usb_driver = &qcdriver,
+ .num_ports = 1,
+ .probe = qcprobe,
++ .open = usb_wwan_open,
++ .close = usb_wwan_close,
++ .write = usb_wwan_write,
++ .write_room = usb_wwan_write_room,
++ .chars_in_buffer = usb_wwan_chars_in_buffer,
++ .attach = usb_wwan_startup,
++ .disconnect = usb_wwan_disconnect,
++ .release = usb_wwan_release,
++#ifdef CONFIG_PM
++ .suspend = usb_wwan_suspend,
++ .resume = usb_wwan_resume,
++#endif
+ };
+
+ static int __init qcinit(void)
+diff -up linux-2.6.32.noarch/drivers/usb/serial/usb_wwan.c.orig linux-2.6.32.noarch/drivers/usb/serial/usb_wwan.c
+--- linux-2.6.32.noarch/drivers/usb/serial/usb_wwan.c.orig 2010-04-01 12:52:26.999995712 -0400
++++ linux-2.6.32.noarch/drivers/usb/serial/usb_wwan.c 2010-04-01 12:52:26.998995550 -0400
+@@ -0,0 +1,665 @@
++/*
++ USB Driver layer for GSM modems
++
++ Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de>
++
++ This driver is free software; you can redistribute it and/or modify
++ it under the terms of Version 2 of the GNU General Public License as
++ published by the Free Software Foundation.
++
++ Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org>
++
++ History: see the git log.
++
++ Work sponsored by: Sigos GmbH, Germany <info@sigos.de>
++
++ This driver exists because the "normal" serial driver doesn't work too well
++ with GSM modems. Issues:
++ - data loss -- one single Receive URB is not nearly enough
++ - controlling the baud rate doesn't make sense
++*/
++
++#define DRIVER_VERSION "v0.7.2"
++#define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>"
++#define DRIVER_DESC "USB Driver for GSM modems"
++
++#include <linux/kernel.h>
++#include <linux/jiffies.h>
++#include <linux/errno.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/module.h>
++#include <linux/bitops.h>
++#include <linux/usb.h>
++#include <linux/usb/serial.h>
++#include "usb-wwan.h"
++
++static int debug;
++
++void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
++{
++ struct usb_serial *serial = port->serial;
++ struct usb_wwan_port_private *portdata;
++
++ struct usb_wwan_intf_private *intfdata;
++
++ dbg("%s", __func__);
++
++ intfdata = port->serial->private;
++
++ if (!intfdata->send_setup)
++ return;
++
++ portdata = usb_get_serial_port_data(port);
++ mutex_lock(&serial->disc_mutex);
++ portdata->rts_state = on;
++ portdata->dtr_state = on;
++ if (serial->dev)
++ intfdata->send_setup(port);
++ mutex_unlock(&serial->disc_mutex);
++}
++EXPORT_SYMBOL(usb_wwan_dtr_rts);
++
++void usb_wwan_set_termios(struct tty_struct *tty,
++ struct usb_serial_port *port,
++ struct ktermios *old_termios)
++{
++ struct usb_wwan_intf_private *intfdata = port->serial->private;
++
++ dbg("%s", __func__);
++
++ /* Doesn't support option setting */
++ tty_termios_copy_hw(tty->termios, old_termios);
++
++ if (intfdata->send_setup)
++ intfdata->send_setup(port);
++}
++EXPORT_SYMBOL(usb_wwan_set_termios);
++
++int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file)
++{
++ struct usb_serial_port *port = tty->driver_data;
++ unsigned int value;
++ struct usb_wwan_port_private *portdata;
++
++ portdata = usb_get_serial_port_data(port);
++
++ value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
++ ((portdata->dtr_state) ? TIOCM_DTR : 0) |
++ ((portdata->cts_state) ? TIOCM_CTS : 0) |
++ ((portdata->dsr_state) ? TIOCM_DSR : 0) |
++ ((portdata->dcd_state) ? TIOCM_CAR : 0) |
++ ((portdata->ri_state) ? TIOCM_RNG : 0);
++
++ return value;
++}
++EXPORT_SYMBOL(usb_wwan_tiocmget);
++
++int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
++ unsigned int set, unsigned int clear)
++{
++ struct usb_serial_port *port = tty->driver_data;
++ struct usb_wwan_port_private *portdata;
++ struct usb_wwan_intf_private *intfdata;
++
++ portdata = usb_get_serial_port_data(port);
++ intfdata = port->serial->private;
++
++ if (!intfdata->send_setup)
++ return -EINVAL;
++
++ /* FIXME: what locks portdata fields ? */
++ if (set & TIOCM_RTS)
++ portdata->rts_state = 1;
++ if (set & TIOCM_DTR)
++ portdata->dtr_state = 1;
++
++ if (clear & TIOCM_RTS)
++ portdata->rts_state = 0;
++ if (clear & TIOCM_DTR)
++ portdata->dtr_state = 0;
++ return intfdata->send_setup(port);
++}
++EXPORT_SYMBOL(usb_wwan_tiocmset);
++
++/* Write */
++int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
++ const unsigned char *buf, int count)
++{
++ struct usb_wwan_port_private *portdata;
++ struct usb_wwan_intf_private *intfdata;
++ int i;
++ int left, todo;
++ struct urb *this_urb = NULL; /* spurious */
++ int err;
++ unsigned long flags;
++
++ portdata = usb_get_serial_port_data(port);
++ intfdata = port->serial->private;
++
++ dbg("%s: write (%d chars)", __func__, count);
++
++ i = 0;
++ left = count;
++ for (i = 0; left > 0 && i < N_OUT_URB; i++) {
++ todo = left;
++ if (todo > OUT_BUFLEN)
++ todo = OUT_BUFLEN;
++
++ this_urb = portdata->out_urbs[i];
++ if (test_and_set_bit(i, &portdata->out_busy)) {
++ if (time_before(jiffies,
++ portdata->tx_start_time[i] + 10 * HZ))
++ continue;
++ usb_unlink_urb(this_urb);
++ continue;
++ }
++ dbg("%s: endpoint %d buf %d", __func__,
++ usb_pipeendpoint(this_urb->pipe), i);
++
++ err = usb_autopm_get_interface_async(port->serial->interface);
++ if (err < 0)
++ break;
++
++ /* send the data */
++ memcpy(this_urb->transfer_buffer, buf, todo);
++ this_urb->transfer_buffer_length = todo;
++
++ spin_lock_irqsave(&intfdata->susp_lock, flags);
++ if (intfdata->suspended) {
++ usb_anchor_urb(this_urb, &portdata->delayed);
++ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
++ } else {
++ intfdata->in_flight++;
++ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
++ err = usb_submit_urb(this_urb, GFP_ATOMIC);
++ if (err) {
++ dbg("usb_submit_urb %p (write bulk) failed "
++ "(%d)", this_urb, err);
++ clear_bit(i, &portdata->out_busy);
++ spin_lock_irqsave(&intfdata->susp_lock, flags);
++ intfdata->in_flight--;
++ spin_unlock_irqrestore(&intfdata->susp_lock,
++ flags);
++ continue;
++ }
++ }
++
++ portdata->tx_start_time[i] = jiffies;
++ buf += todo;
++ left -= todo;
++ }
++
++ count -= left;
++ dbg("%s: wrote (did %d)", __func__, count);
++ return count;
++}
++EXPORT_SYMBOL(usb_wwan_write);
++
++static void usb_wwan_indat_callback(struct urb *urb)
++{
++ int err;
++ int endpoint;
++ struct usb_serial_port *port;
++ struct tty_struct *tty;
++ unsigned char *data = urb->transfer_buffer;
++ int status = urb->status;
++
++ dbg("%s: %p", __func__, urb);
++
++ endpoint = usb_pipeendpoint(urb->pipe);
++ port = urb->context;
++
++ if (status) {
++ dbg("%s: nonzero status: %d on endpoint %02x.",
++ __func__, status, endpoint);
++ } else {
++ tty = tty_port_tty_get(&port->port);
++ if (urb->actual_length) {
++ tty_buffer_request_room(tty, urb->actual_length);
++ tty_insert_flip_string(tty, data, urb->actual_length);
++ tty_flip_buffer_push(tty);
++ } else
++ dbg("%s: empty read urb received", __func__);
++ tty_kref_put(tty);
++
++ /* Resubmit urb so we continue receiving */
++ if (port->port.count && status != -ESHUTDOWN) {
++ err = usb_submit_urb(urb, GFP_ATOMIC);
++ if (err)
++ printk(KERN_ERR "%s: resubmit read urb failed. "
++ "(%d)", __func__, err);
++ else
++ usb_mark_last_busy(port->serial->dev);
++ }
++
++ }
++ return;
++}
++
++static void usb_wwan_outdat_callback(struct urb *urb)
++{
++ struct usb_serial_port *port;
++ struct usb_wwan_port_private *portdata;
++ struct usb_wwan_intf_private *intfdata;
++ int i;
++
++ dbg("%s", __func__);
++
++ port = urb->context;
++ intfdata = port->serial->private;
++
++ usb_serial_port_softint(port);
++ usb_autopm_put_interface_async(port->serial->interface);
++ portdata = usb_get_serial_port_data(port);
++ spin_lock(&intfdata->susp_lock);
++ intfdata->in_flight--;
++ spin_unlock(&intfdata->susp_lock);
++
++ for (i = 0; i < N_OUT_URB; ++i) {
++ if (portdata->out_urbs[i] == urb) {
++ smp_mb__before_clear_bit();
++ clear_bit(i, &portdata->out_busy);
++ break;
++ }
++ }
++}
++
++int usb_wwan_write_room(struct tty_struct *tty)
++{
++ struct usb_serial_port *port = tty->driver_data;
++ struct usb_wwan_port_private *portdata;
++ int i;
++ int data_len = 0;
++ struct urb *this_urb;
++
++ portdata = usb_get_serial_port_data(port);
++
++ for (i = 0; i < N_OUT_URB; i++) {
++ this_urb = portdata->out_urbs[i];
++ if (this_urb && !test_bit(i, &portdata->out_busy))
++ data_len += OUT_BUFLEN;
++ }
++
++ dbg("%s: %d", __func__, data_len);
++ return data_len;
++}
++EXPORT_SYMBOL(usb_wwan_write_room);
++
++int usb_wwan_chars_in_buffer(struct tty_struct *tty)
++{
++ struct usb_serial_port *port = tty->driver_data;
++ struct usb_wwan_port_private *portdata;
++ int i;
++ int data_len = 0;
++ struct urb *this_urb;
++
++ portdata = usb_get_serial_port_data(port);
++
++ for (i = 0; i < N_OUT_URB; i++) {
++ this_urb = portdata->out_urbs[i];
++ /* FIXME: This locking is insufficient as this_urb may
++ go unused during the test */
++ if (this_urb && test_bit(i, &portdata->out_busy))
++ data_len += this_urb->transfer_buffer_length;
++ }
++ dbg("%s: %d", __func__, data_len);
++ return data_len;
++}
++EXPORT_SYMBOL(usb_wwan_chars_in_buffer);
++
++int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
++{
++ struct usb_wwan_port_private *portdata;
++ struct usb_wwan_intf_private *intfdata;
++ struct usb_serial *serial = port->serial;
++ int i, err;
++ struct urb *urb;
++
++ portdata = usb_get_serial_port_data(port);
++ intfdata = serial->private;
++
++ dbg("%s", __func__);
++
++ /* Start reading from the IN endpoint */
++ for (i = 0; i < N_IN_URB; i++) {
++ urb = portdata->in_urbs[i];
++ if (!urb)
++ continue;
++ err = usb_submit_urb(urb, GFP_KERNEL);
++ if (err) {
++ dbg("%s: submit urb %d failed (%d) %d",
++ __func__, i, err, urb->transfer_buffer_length);
++ }
++ }
++
++ if (intfdata->send_setup)
++ intfdata->send_setup(port);
++
++ serial->interface->needs_remote_wakeup = 1;
++ spin_lock_irq(&intfdata->susp_lock);
++ portdata->opened = 1;
++ spin_unlock_irq(&intfdata->susp_lock);
++ usb_autopm_put_interface(serial->interface);
++
++ return 0;
++}
++EXPORT_SYMBOL(usb_wwan_open);
++
++void usb_wwan_close(struct usb_serial_port *port)
++{
++ int i;
++ struct usb_serial *serial = port->serial;
++ struct usb_wwan_port_private *portdata;
++ struct usb_wwan_intf_private *intfdata = port->serial->private;
++
++ dbg("%s", __func__);
++ portdata = usb_get_serial_port_data(port);
++
++ if (serial->dev) {
++ /* Stop reading/writing urbs */
++ spin_lock_irq(&intfdata->susp_lock);
++ portdata->opened = 0;
++ spin_unlock_irq(&intfdata->susp_lock);
++
++ for (i = 0; i < N_IN_URB; i++)
++ usb_kill_urb(portdata->in_urbs[i]);
++ for (i = 0; i < N_OUT_URB; i++)
++ usb_kill_urb(portdata->out_urbs[i]);
++ usb_autopm_get_interface(serial->interface);
++ serial->interface->needs_remote_wakeup = 0;
++ }
++}
++EXPORT_SYMBOL(usb_wwan_close);
++
++/* Helper functions used by usb_wwan_setup_urbs */
++static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint,
++ int dir, void *ctx, char *buf, int len,
++ void (*callback) (struct urb *))
++{
++ struct urb *urb;
++
++ if (endpoint == -1)
++ return NULL; /* endpoint not needed */
++
++ urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
++ if (urb == NULL) {
++ dbg("%s: alloc for endpoint %d failed.", __func__, endpoint);
++ return NULL;
++ }
++
++ /* Fill URB using supplied data. */
++ usb_fill_bulk_urb(urb, serial->dev,
++ usb_sndbulkpipe(serial->dev, endpoint) | dir,
++ buf, len, callback, ctx);
++
++ return urb;
++}
++
++/* Setup urbs */
++static void usb_wwan_setup_urbs(struct usb_serial *serial)
++{
++ int i, j;
++ struct usb_serial_port *port;
++ struct usb_wwan_port_private *portdata;
++
++ dbg("%s", __func__);
++
++ for (i = 0; i < serial->num_ports; i++) {
++ port = serial->port[i];
++ portdata = usb_get_serial_port_data(port);
++
++ /* Do indat endpoints first */
++ for (j = 0; j < N_IN_URB; ++j) {
++ portdata->in_urbs[j] = usb_wwan_setup_urb(serial,
++ port->
++ bulk_in_endpointAddress,
++ USB_DIR_IN,
++ port,
++ portdata->
++ in_buffer[j],
++ IN_BUFLEN,
++ usb_wwan_indat_callback);
++ }
++
++ /* outdat endpoints */
++ for (j = 0; j < N_OUT_URB; ++j) {
++ portdata->out_urbs[j] = usb_wwan_setup_urb(serial,
++ port->
++ bulk_out_endpointAddress,
++ USB_DIR_OUT,
++ port,
++ portdata->
++ out_buffer
++ [j],
++ OUT_BUFLEN,
++ usb_wwan_outdat_callback);
++ }
++ }
++}
++
++int usb_wwan_startup(struct usb_serial *serial)
++{
++ int i, j, err;
++ struct usb_serial_port *port;
++ struct usb_wwan_port_private *portdata;
++ u8 *buffer;
++
++ dbg("%s", __func__);
++
++ /* Now setup per port private data */
++ for (i = 0; i < serial->num_ports; i++) {
++ port = serial->port[i];
++ portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
++ if (!portdata) {
++ dbg("%s: kmalloc for usb_wwan_port_private (%d) failed!.",
++ __func__, i);
++ return 1;
++ }
++ init_usb_anchor(&portdata->delayed);
++
++ for (j = 0; j < N_IN_URB; j++) {
++ buffer = (u8 *) __get_free_page(GFP_KERNEL);
++ if (!buffer)
++ goto bail_out_error;
++ portdata->in_buffer[j] = buffer;
++ }
++
++ for (j = 0; j < N_OUT_URB; j++) {
++ buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
++ if (!buffer)
++ goto bail_out_error2;
++ portdata->out_buffer[j] = buffer;
++ }
++
++ usb_set_serial_port_data(port, portdata);
++
++ if (!port->interrupt_in_urb)
++ continue;
++ err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
++ if (err)
++ dbg("%s: submit irq_in urb failed %d", __func__, err);
++ }
++ usb_wwan_setup_urbs(serial);
++ return 0;
++
++bail_out_error2:
++ for (j = 0; j < N_OUT_URB; j++)
++ kfree(portdata->out_buffer[j]);
++bail_out_error:
++ for (j = 0; j < N_IN_URB; j++)
++ if (portdata->in_buffer[j])
++ free_page((unsigned long)portdata->in_buffer[j]);
++ kfree(portdata);
++ return 1;
++}
++EXPORT_SYMBOL(usb_wwan_startup);
++
++static void stop_read_write_urbs(struct usb_serial *serial)
++{
++ int i, j;
++ struct usb_serial_port *port;
++ struct usb_wwan_port_private *portdata;
++
++ /* Stop reading/writing urbs */
++ for (i = 0; i < serial->num_ports; ++i) {
++ port = serial->port[i];
++ portdata = usb_get_serial_port_data(port);
++ for (j = 0; j < N_IN_URB; j++)
++ usb_kill_urb(portdata->in_urbs[j]);
++ for (j = 0; j < N_OUT_URB; j++)
++ usb_kill_urb(portdata->out_urbs[j]);
++ }
++}
++
++void usb_wwan_disconnect(struct usb_serial *serial)
++{
++ dbg("%s", __func__);
++
++ stop_read_write_urbs(serial);
++}
++EXPORT_SYMBOL(usb_wwan_disconnect);
++
++void usb_wwan_release(struct usb_serial *serial)
++{
++ int i, j;
++ struct usb_serial_port *port;
++ struct usb_wwan_port_private *portdata;
++
++ dbg("%s", __func__);
++
++ /* Now free them */
++ for (i = 0; i < serial->num_ports; ++i) {
++ port = serial->port[i];
++ portdata = usb_get_serial_port_data(port);
++
++ for (j = 0; j < N_IN_URB; j++) {
++ usb_free_urb(portdata->in_urbs[j]);
++ free_page((unsigned long)
++ portdata->in_buffer[j]);
++ portdata->in_urbs[j] = NULL;
++ }
++ for (j = 0; j < N_OUT_URB; j++) {
++ usb_free_urb(portdata->out_urbs[j]);
++ kfree(portdata->out_buffer[j]);
++ portdata->out_urbs[j] = NULL;
++ }
++ }
++
++ /* Now free per port private data */
++ for (i = 0; i < serial->num_ports; i++) {
++ port = serial->port[i];
++ kfree(usb_get_serial_port_data(port));
++ }
++}
++EXPORT_SYMBOL(usb_wwan_release);
++
++#ifdef CONFIG_PM
++int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
++{
++ struct usb_wwan_intf_private *intfdata = serial->private;
++ int b;
++
++ dbg("%s entered", __func__);
++
++ if (message.event & PM_EVENT_AUTO) {
++ spin_lock_irq(&intfdata->susp_lock);
++ b = intfdata->in_flight;
++ spin_unlock_irq(&intfdata->susp_lock);
++
++ if (b)
++ return -EBUSY;
++ }
++
++ spin_lock_irq(&intfdata->susp_lock);
++ intfdata->suspended = 1;
++ spin_unlock_irq(&intfdata->susp_lock);
++ stop_read_write_urbs(serial);
++
++ return 0;
++}
++EXPORT_SYMBOL(usb_wwan_suspend);
++
++static void play_delayed(struct usb_serial_port *port)
++{
++ struct usb_wwan_intf_private *data;
++ struct usb_wwan_port_private *portdata;
++ struct urb *urb;
++ int err;
++
++ portdata = usb_get_serial_port_data(port);
++ data = port->serial->private;
++ while ((urb = usb_get_from_anchor(&portdata->delayed))) {
++ err = usb_submit_urb(urb, GFP_ATOMIC);
++ if (!err)
++ data->in_flight++;
++ }
++}
++
++int usb_wwan_resume(struct usb_serial *serial)
++{
++ int i, j;
++ struct usb_serial_port *port;
++ struct usb_wwan_intf_private *intfdata = serial->private;
++ struct usb_wwan_port_private *portdata;
++ struct urb *urb;
++ int err = 0;
++
++ dbg("%s entered", __func__);
++ /* get the interrupt URBs resubmitted unconditionally */
++ for (i = 0; i < serial->num_ports; i++) {
++ port = serial->port[i];
++ if (!port->interrupt_in_urb) {
++ dbg("%s: No interrupt URB for port %d\n", __func__, i);
++ continue;
++ }
++ err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
++ dbg("Submitted interrupt URB for port %d (result %d)", i, err);
++ if (err < 0) {
++ err("%s: Error %d for interrupt URB of port%d",
++ __func__, err, i);
++ goto err_out;
++ }
++ }
++
++ for (i = 0; i < serial->num_ports; i++) {
++ /* walk all ports */
++ port = serial->port[i];
++ portdata = usb_get_serial_port_data(port);
++
++ /* skip closed ports */
++ spin_lock_irq(&intfdata->susp_lock);
++ if (!portdata->opened) {
++ spin_unlock_irq(&intfdata->susp_lock);
++ continue;
++ }
++
++ for (j = 0; j < N_IN_URB; j++) {
++ urb = portdata->in_urbs[j];
++ err = usb_submit_urb(urb, GFP_ATOMIC);
++ if (err < 0) {
++ err("%s: Error %d for bulk URB %d",
++ __func__, err, i);
++ spin_unlock_irq(&intfdata->susp_lock);
++ goto err_out;
++ }
++ }
++ play_delayed(port);
++ spin_unlock_irq(&intfdata->susp_lock);
++ }
++ spin_lock_irq(&intfdata->susp_lock);
++ intfdata->suspended = 0;
++ spin_unlock_irq(&intfdata->susp_lock);
++err_out:
++ return err;
++}
++EXPORT_SYMBOL(usb_wwan_resume);
++#endif
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_VERSION(DRIVER_VERSION);
++MODULE_LICENSE("GPL");
++
++module_param(debug, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(debug, "Debug messages");
+diff -up linux-2.6.32.noarch/drivers/usb/serial/usb-wwan.h.orig linux-2.6.32.noarch/drivers/usb/serial/usb-wwan.h
+--- linux-2.6.32.noarch/drivers/usb/serial/usb-wwan.h.orig 2010-04-01 12:52:26.999995712 -0400
++++ linux-2.6.32.noarch/drivers/usb/serial/usb-wwan.h 2010-04-01 12:52:26.999995712 -0400
+@@ -0,0 +1,66 @@
++/*
++ * Definitions for USB serial mobile broadband cards
++ */
++
++#ifndef __LINUX_USB_USB_WWAN
++#define __LINUX_USB_USB_WWAN
++
++extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on);
++extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
++extern void usb_wwan_close(struct usb_serial_port *port);
++extern int usb_wwan_startup(struct usb_serial *serial);
++extern void usb_wwan_disconnect(struct usb_serial *serial);
++extern void usb_wwan_release(struct usb_serial *serial);
++extern int usb_wwan_write_room(struct tty_struct *tty);
++extern void usb_wwan_set_termios(struct tty_struct *tty,
++ struct usb_serial_port *port,
++ struct ktermios *old);
++extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file);
++extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
++ unsigned int set, unsigned int clear);
++extern int usb_wwan_send_setup(struct usb_serial_port *port);
++extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
++ const unsigned char *buf, int count);
++extern int usb_wwan_chars_in_buffer(struct tty_struct *tty);
++#ifdef CONFIG_PM
++extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message);
++extern int usb_wwan_resume(struct usb_serial *serial);
++#endif
++
++/* per port private data */
++
++#define N_IN_URB 4
++#define N_OUT_URB 4
++#define IN_BUFLEN 4096
++#define OUT_BUFLEN 4096
++
++struct usb_wwan_intf_private {
++ spinlock_t susp_lock;
++ unsigned int suspended:1;
++ int in_flight;
++ int (*send_setup) (struct usb_serial_port *port);
++};
++
++struct usb_wwan_port_private {
++ /* Input endpoints and buffer for this port */
++ struct urb *in_urbs[N_IN_URB];
++ u8 *in_buffer[N_IN_URB];
++ /* Output endpoints and buffer for this port */
++ struct urb *out_urbs[N_OUT_URB];
++ u8 *out_buffer[N_OUT_URB];
++ unsigned long out_busy; /* Bit vector of URBs in use */
++ int opened;
++ struct usb_anchor delayed;
++
++ /* Settings for the port */
++ int rts_state; /* Handshaking pins (outputs) */
++ int dtr_state;
++ int cts_state; /* Handshaking pins (inputs) */
++ int dsr_state;
++ int dcd_state;
++ int ri_state;
++
++ unsigned long tx_start_time[N_OUT_URB];
++};
++
++#endif /* __LINUX_USB_USB_WWAN */
diff --git a/freed-ora/current/F-12/linux-2.6-utrace-ptrace.patch b/freed-ora/current/F-12/linux-2.6-utrace-ptrace.patch
new file mode 100644
index 000000000..e95ca6ade
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-utrace-ptrace.patch
@@ -0,0 +1,1825 @@
+implement utrace-ptrace
+
+The patch adds the new file, kernel/ptrace-utrace.c, which contains
+the new implementation of ptrace over utrace.
+
+This file is not compiled until we have CONFIG_UTRACE option, will be
+added by the next "utrace core" patch.
+
+It's supposed to be an invisible implementation change, nothing should
+change to userland when CONFIG_UTRACE is enabled.
+
+Signed-off-by: Roland McGrath <roland@redhat.com>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+---
+ include/linux/ptrace.h | 2 +-
+ kernel/Makefile | 1 +
+ kernel/ptrace-utrace.c | 1080 ++++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/ptrace.c | 572 +++++++++++++-------------
+ kernel/utrace.c | 16 +
+ 5 files changed, 1378 insertions(+), 293 deletions(-)
+
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 4802e2a..03f8fc7 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -79,7 +79,7 @@
+ #include <linux/compiler.h> /* For unlikely. */
+ #include <linux/sched.h> /* For struct task_struct. */
+
+-
++extern void ptrace_notify_stop(struct task_struct *tracee);
+ extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
+ extern int ptrace_traceme(void);
+ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
+diff --git a/kernel/Makefile b/kernel/Makefile
+index 263bb19..42cb1ec 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -69,6 +69,7 @@ obj-$(CONFIG_RESOURCE_COUNTERS) += res_c
+ obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
+ obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
+ obj-$(CONFIG_UTRACE) += utrace.o
++obj-$(CONFIG_UTRACE) += ptrace-utrace.o
+ obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
+ obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
+ obj-$(CONFIG_GCOV_KERNEL) += gcov/
+diff --git a/kernel/ptrace-utrace.c b/kernel/ptrace-utrace.c
+new file mode 100644
+index ...ea419ee 100644
+--- /dev/null
++++ b/kernel/ptrace-utrace.c
+@@ -0,0 +1,1080 @@
++/*
++ * linux/kernel/ptrace.c
++ *
++ * (C) Copyright 1999 Linus Torvalds
++ *
++ * Common interfaces for "ptrace()" which we do not want
++ * to continually duplicate across every architecture.
++ */
++
++#include <linux/capability.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/smp_lock.h>
++#include <linux/ptrace.h>
++#include <linux/utrace.h>
++#include <linux/security.h>
++#include <linux/signal.h>
++#include <linux/audit.h>
++#include <linux/pid_namespace.h>
++#include <linux/syscalls.h>
++#include <linux/uaccess.h>
++
++/*
++ * ptrace a task: make the debugger its new parent and
++ * move it to the ptrace list.
++ *
++ * Must be called with the tasklist lock write-held.
++ */
++void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
++{
++ BUG_ON(!list_empty(&child->ptrace_entry));
++ list_add(&child->ptrace_entry, &new_parent->ptraced);
++ child->parent = new_parent;
++}
++
++/*
++ * unptrace a task: move it back to its original parent and
++ * remove it from the ptrace list.
++ *
++ * Must be called with the tasklist lock write-held.
++ */
++void __ptrace_unlink(struct task_struct *child)
++{
++ BUG_ON(!child->ptrace);
++
++ child->ptrace = 0;
++ child->parent = child->real_parent;
++ list_del_init(&child->ptrace_entry);
++
++ arch_ptrace_untrace(child);
++}
++
++struct ptrace_context {
++ int options;
++
++ int signr;
++ siginfo_t *siginfo;
++
++ int stop_code;
++ unsigned long eventmsg;
++
++ enum utrace_resume_action resume;
++};
++
++#define PT_UTRACED 0x00001000
++
++#define PTRACE_O_SYSEMU 0x100
++
++#define PTRACE_EVENT_SYSCALL (1 << 16)
++#define PTRACE_EVENT_SIGTRAP (2 << 16)
++#define PTRACE_EVENT_SIGNAL (3 << 16)
++/* events visible to user-space */
++#define PTRACE_EVENT_MASK 0xFFFF
++
++static inline bool ptrace_event_pending(struct ptrace_context *ctx)
++{
++ return ctx->stop_code != 0;
++}
++
++static inline int get_stop_event(struct ptrace_context *ctx)
++{
++ return ctx->stop_code >> 8;
++}
++
++static inline void set_stop_code(struct ptrace_context *ctx, int event)
++{
++ ctx->stop_code = (event << 8) | SIGTRAP;
++}
++
++static inline struct ptrace_context *
++ptrace_context(struct utrace_engine *engine)
++{
++ return engine->data;
++}
++
++static const struct utrace_engine_ops ptrace_utrace_ops; /* forward decl */
++
++static struct utrace_engine *ptrace_lookup_engine(struct task_struct *tracee)
++{
++ return utrace_attach_task(tracee, UTRACE_ATTACH_MATCH_OPS,
++ &ptrace_utrace_ops, NULL);
++}
++
++static struct utrace_engine *
++ptrace_reuse_engine(struct task_struct *tracee)
++{
++ struct utrace_engine *engine;
++ struct ptrace_context *ctx;
++ int err = -EPERM;
++
++ engine = ptrace_lookup_engine(tracee);
++ if (IS_ERR(engine))
++ return engine;
++
++ ctx = ptrace_context(engine);
++ if (unlikely(ctx->resume == UTRACE_DETACH)) {
++ /*
++ * Try to reuse this self-detaching engine.
++ * The only caller which can hit this case is ptrace_attach(),
++ * it holds ->cred_guard_mutex.
++ */
++ ctx->options = 0;
++ ctx->eventmsg = 0;
++
++ /* make sure we don't get unwanted reports */
++ err = utrace_set_events(tracee, engine, UTRACE_EVENT(QUIESCE));
++ if (!err || err == -EINPROGRESS) {
++ ctx->resume = UTRACE_RESUME;
++ /* synchronize with ptrace_report_signal() */
++ err = utrace_barrier(tracee, engine);
++ }
++ WARN_ON(!err != (engine->ops == &ptrace_utrace_ops));
++
++ if (!err)
++ return engine;
++ }
++
++ utrace_engine_put(engine);
++ return ERR_PTR(err);
++}
++
++static struct utrace_engine *
++ptrace_attach_engine(struct task_struct *tracee)
++{
++ struct utrace_engine *engine;
++ struct ptrace_context *ctx;
++
++ if (unlikely(task_utrace_flags(tracee))) {
++ engine = ptrace_reuse_engine(tracee);
++ if (!IS_ERR(engine) || IS_ERR(engine) == -EPERM)
++ return engine;
++ }
++
++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++ if (unlikely(!ctx))
++ return ERR_PTR(-ENOMEM);
++
++ ctx->resume = UTRACE_RESUME;
++
++ engine = utrace_attach_task(tracee, UTRACE_ATTACH_CREATE |
++ UTRACE_ATTACH_EXCLUSIVE |
++ UTRACE_ATTACH_MATCH_OPS,
++ &ptrace_utrace_ops, ctx);
++ if (unlikely(IS_ERR(engine))) {
++ if (engine != ERR_PTR(-ESRCH) &&
++ engine != ERR_PTR(-ERESTARTNOINTR))
++ engine = ERR_PTR(-EPERM);
++ kfree(ctx);
++ }
++
++ return engine;
++}
++
++static inline int ptrace_set_events(struct task_struct *target,
++ struct utrace_engine *engine,
++ unsigned long options)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++ /*
++ * We need QUIESCE for resume handling, CLONE to check
++ * for CLONE_PTRACE, other events are always reported.
++ */
++ unsigned long events = UTRACE_EVENT(QUIESCE) | UTRACE_EVENT(CLONE) |
++ UTRACE_EVENT(EXEC) | UTRACE_EVENT_SIGNAL_ALL;
++
++ ctx->options = options;
++ if (options & PTRACE_O_TRACEEXIT)
++ events |= UTRACE_EVENT(EXIT);
++
++ return utrace_set_events(target, engine, events);
++}
++
++/*
++ * Attach a utrace engine for ptrace and set up its event mask.
++ * Returns error code or 0 on success.
++ */
++static int ptrace_attach_task(struct task_struct *tracee, int options)
++{
++ struct utrace_engine *engine;
++ int err;
++
++ engine = ptrace_attach_engine(tracee);
++ if (IS_ERR(engine))
++ return PTR_ERR(engine);
++ /*
++ * It can fail only if the tracee is dead, the caller
++ * must notice this before setting PT_UTRACED.
++ */
++ err = ptrace_set_events(tracee, engine, options);
++ WARN_ON(err && !tracee->exit_state);
++ utrace_engine_put(engine);
++ return 0;
++}
++
++static int ptrace_wake_up(struct task_struct *tracee,
++ struct utrace_engine *engine,
++ enum utrace_resume_action action,
++ bool force_wakeup)
++{
++ if (force_wakeup) {
++ unsigned long flags;
++ /*
++ * Preserve the compatibility bug. Historically ptrace
++ * wakes up the tracee even if it should not. Clear
++ * SIGNAL_STOP_STOPPED for utrace_wakeup().
++ */
++ if (lock_task_sighand(tracee, &flags)) {
++ tracee->signal->flags &= ~SIGNAL_STOP_STOPPED;
++ unlock_task_sighand(tracee, &flags);
++ }
++ }
++
++ if (action != UTRACE_REPORT)
++ ptrace_context(engine)->stop_code = 0;
++
++ return utrace_control(tracee, engine, action);
++}
++
++static void ptrace_detach_task(struct task_struct *tracee, int sig)
++{
++ /*
++ * If true, the caller is PTRACE_DETACH, otherwise
++ * the tracer detaches implicitly during exit.
++ */
++ bool voluntary = (sig >= 0);
++ struct utrace_engine *engine = ptrace_lookup_engine(tracee);
++ enum utrace_resume_action action = UTRACE_DETACH;
++
++ if (unlikely(IS_ERR(engine)))
++ return;
++
++ if (sig) {
++ struct ptrace_context *ctx = ptrace_context(engine);
++
++ switch (get_stop_event(ctx)) {
++ case PTRACE_EVENT_SYSCALL:
++ if (voluntary)
++ send_sig_info(sig, SEND_SIG_PRIV, tracee);
++ break;
++
++ case PTRACE_EVENT_SIGNAL:
++ if (voluntary)
++ ctx->signr = sig;
++ ctx->resume = UTRACE_DETACH;
++ action = UTRACE_RESUME;
++ break;
++ }
++ }
++
++ ptrace_wake_up(tracee, engine, action, voluntary);
++ utrace_engine_put(engine);
++}
++
++static void ptrace_abort_attach(struct task_struct *tracee)
++{
++ ptrace_detach_task(tracee, 0);
++}
++
++static u32 ptrace_report_exit(u32 action, struct utrace_engine *engine,
++ long orig_code, long *code)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++
++ WARN_ON(ptrace_event_pending(ctx) &&
++ !signal_group_exit(current->signal));
++
++ set_stop_code(ctx, PTRACE_EVENT_EXIT);
++ ctx->eventmsg = *code;
++
++ return UTRACE_STOP;
++}
++
++static void ptrace_clone_attach(struct task_struct *child,
++ int options)
++{
++ struct task_struct *parent = current;
++ struct task_struct *tracer;
++ bool abort = true;
++
++ if (unlikely(ptrace_attach_task(child, options))) {
++ WARN_ON(1);
++ return;
++ }
++
++ write_lock_irq(&tasklist_lock);
++ tracer = parent->parent;
++ if (!(tracer->flags & PF_EXITING) && parent->ptrace) {
++ child->ptrace = parent->ptrace;
++ __ptrace_link(child, tracer);
++ abort = false;
++ }
++ write_unlock_irq(&tasklist_lock);
++ if (unlikely(abort)) {
++ ptrace_abort_attach(child);
++ return;
++ }
++
++ sigaddset(&child->pending.signal, SIGSTOP);
++ set_tsk_thread_flag(child, TIF_SIGPENDING);
++}
++
++static u32 ptrace_report_clone(u32 action, struct utrace_engine *engine,
++ unsigned long clone_flags,
++ struct task_struct *child)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++ int event = 0;
++
++ WARN_ON(ptrace_event_pending(ctx));
++
++ if (clone_flags & CLONE_UNTRACED) {
++ /* no events reported */
++ } else if (clone_flags & CLONE_VFORK) {
++ if (ctx->options & PTRACE_O_TRACEVFORK)
++ event = PTRACE_EVENT_VFORK;
++ else if (ctx->options & PTRACE_O_TRACEVFORKDONE)
++ event = PTRACE_EVENT_VFORK_DONE;
++ } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
++ if (ctx->options & PTRACE_O_TRACECLONE)
++ event = PTRACE_EVENT_CLONE;
++ } else if (ctx->options & PTRACE_O_TRACEFORK) {
++ event = PTRACE_EVENT_FORK;
++ }
++ /*
++ * Any of these reports implies auto-attaching the new child.
++ * So does CLONE_PTRACE, even with no event to report.
++ */
++ if ((event && event != PTRACE_EVENT_VFORK_DONE) ||
++ (clone_flags & CLONE_PTRACE))
++ ptrace_clone_attach(child, ctx->options);
++
++ if (!event)
++ return UTRACE_RESUME;
++
++ set_stop_code(ctx, event);
++ ctx->eventmsg = child->pid;
++ /*
++ * We shouldn't stop now, inside the do_fork() path.
++ * We will stop later, before return to user-mode.
++ */
++ if (event == PTRACE_EVENT_VFORK_DONE)
++ return UTRACE_REPORT;
++ else
++ return UTRACE_STOP;
++}
++
++static inline void set_syscall_code(struct ptrace_context *ctx)
++{
++ set_stop_code(ctx, PTRACE_EVENT_SYSCALL);
++ if (ctx->options & PTRACE_O_TRACESYSGOOD)
++ ctx->stop_code |= 0x80;
++}
++
++static u32 ptrace_report_syscall_entry(u32 action, struct utrace_engine *engine,
++ struct pt_regs *regs)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++
++ if (action & UTRACE_SYSCALL_RESUMED) {
++ /*
++ * We already reported the first time.
++ * Nothing more to do now.
++ */
++ if (unlikely(ctx->options & PTRACE_O_SYSEMU))
++ return UTRACE_SYSCALL_ABORT | UTRACE_REPORT;
++ return utrace_syscall_action(action) | UTRACE_RESUME;
++ }
++
++ WARN_ON(ptrace_event_pending(ctx));
++
++ set_syscall_code(ctx);
++
++ if (unlikely(ctx->options & PTRACE_O_SYSEMU))
++ return UTRACE_SYSCALL_ABORT | UTRACE_REPORT;
++ /*
++ * Stop now to report. We will get another callback after
++ * we resume, with the UTRACE_SYSCALL_RESUMED flag set.
++ */
++ return UTRACE_SYSCALL_RUN | UTRACE_STOP;
++}
++
++static u32 ptrace_report_syscall_exit(u32 action, struct utrace_engine *engine,
++ struct pt_regs *regs)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++
++ if (ptrace_event_pending(ctx))
++ return UTRACE_STOP;
++
++ if (ctx->resume != UTRACE_RESUME) {
++ WARN_ON(ctx->resume != UTRACE_BLOCKSTEP &&
++ ctx->resume != UTRACE_SINGLESTEP);
++ ctx->resume = UTRACE_RESUME;
++
++ ctx->signr = SIGTRAP;
++ return UTRACE_INTERRUPT;
++ }
++
++ set_syscall_code(ctx);
++ return UTRACE_STOP;
++}
++
++static u32 ptrace_report_exec(u32 action, struct utrace_engine *engine,
++ const struct linux_binfmt *fmt,
++ const struct linux_binprm *bprm,
++ struct pt_regs *regs)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++
++ WARN_ON(ptrace_event_pending(ctx));
++
++ if (!(ctx->options & PTRACE_O_TRACEEXEC)) {
++ /*
++ * Old-fashioned ptrace'd exec just posts a plain signal.
++ */
++ send_sig(SIGTRAP, current, 0);
++ return UTRACE_RESUME;
++ }
++
++ set_stop_code(ctx, PTRACE_EVENT_EXEC);
++ return UTRACE_STOP;
++}
++
++static enum utrace_signal_action resume_signal(struct ptrace_context *ctx,
++ struct k_sigaction *return_ka)
++{
++ siginfo_t *info = ctx->siginfo;
++ int signr = ctx->signr;
++
++ ctx->siginfo = NULL;
++ ctx->signr = 0;
++
++ /* Did the debugger cancel the sig? */
++ if (!signr)
++ return UTRACE_SIGNAL_IGN;
++ /*
++ * Update the siginfo structure if the signal has changed.
++ * If the debugger wanted something specific in the siginfo
++ * then it should have updated *info via PTRACE_SETSIGINFO.
++ */
++ if (info->si_signo != signr) {
++ info->si_signo = signr;
++ info->si_errno = 0;
++ info->si_code = SI_USER;
++ info->si_pid = task_pid_vnr(current->parent);
++ info->si_uid = task_uid(current->parent);
++ }
++
++ /* If the (new) signal is now blocked, requeue it. */
++ if (sigismember(&current->blocked, signr)) {
++ send_sig_info(signr, info, current);
++ return UTRACE_SIGNAL_IGN;
++ }
++
++ spin_lock_irq(&current->sighand->siglock);
++ *return_ka = current->sighand->action[signr - 1];
++ spin_unlock_irq(&current->sighand->siglock);
++
++ return UTRACE_SIGNAL_DELIVER;
++}
++
++static u32 ptrace_report_signal(u32 action, struct utrace_engine *engine,
++ struct pt_regs *regs,
++ siginfo_t *info,
++ const struct k_sigaction *orig_ka,
++ struct k_sigaction *return_ka)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++ enum utrace_resume_action resume = ctx->resume;
++
++ if (ptrace_event_pending(ctx)) {
++ action = utrace_signal_action(action);
++ WARN_ON(action != UTRACE_SIGNAL_REPORT);
++ return action | UTRACE_STOP;
++ }
++
++ switch (utrace_signal_action(action)) {
++ case UTRACE_SIGNAL_HANDLER:
++ if (WARN_ON(ctx->siginfo))
++ ctx->siginfo = NULL;
++
++ if (resume != UTRACE_RESUME) {
++ WARN_ON(resume != UTRACE_BLOCKSTEP &&
++ resume != UTRACE_SINGLESTEP);
++
++ set_stop_code(ctx, PTRACE_EVENT_SIGTRAP);
++ return UTRACE_STOP | UTRACE_SIGNAL_IGN;
++ }
++
++ case UTRACE_SIGNAL_REPORT:
++ if (!ctx->siginfo) {
++ if (ctx->signr) {
++ /* set by ptrace_resume(SYSCALL_EXIT) */
++ WARN_ON(ctx->signr != SIGTRAP);
++ user_single_step_siginfo(current, regs, info);
++ force_sig_info(SIGTRAP, info, current);
++ }
++
++ return resume | UTRACE_SIGNAL_IGN;
++ }
++
++ if (WARN_ON(ctx->siginfo != info))
++ return resume | UTRACE_SIGNAL_IGN;
++
++ return resume | resume_signal(ctx, return_ka);
++
++ default:
++ break;
++ }
++
++ WARN_ON(ctx->siginfo);
++ ctx->siginfo = info;
++ /*
++ * ctx->siginfo points to the caller's stack.
++ * Make sure the subsequent UTRACE_SIGNAL_REPORT clears
++ * ->siginfo before return from get_signal_to_deliver().
++ */
++ if (utrace_control(current, engine, UTRACE_INTERRUPT))
++ WARN_ON(1);
++
++ ctx->signr = info->si_signo;
++ ctx->stop_code = (PTRACE_EVENT_SIGNAL << 8) | ctx->signr;
++
++ return UTRACE_STOP | UTRACE_SIGNAL_IGN;
++}
++
++static u32 ptrace_report_quiesce(u32 action, struct utrace_engine *engine,
++ unsigned long event)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++
++ if (ptrace_event_pending(ctx))
++ return UTRACE_STOP;
++
++ return event ? UTRACE_RESUME : ctx->resume;
++}
++
++static void ptrace_release(void *data)
++{
++ kfree(data);
++}
++
++static const struct utrace_engine_ops ptrace_utrace_ops = {
++ .report_signal = ptrace_report_signal,
++ .report_quiesce = ptrace_report_quiesce,
++ .report_exec = ptrace_report_exec,
++ .report_exit = ptrace_report_exit,
++ .report_clone = ptrace_report_clone,
++ .report_syscall_entry = ptrace_report_syscall_entry,
++ .report_syscall_exit = ptrace_report_syscall_exit,
++ .release = ptrace_release,
++};
++
++int ptrace_check_attach(struct task_struct *child, int kill)
++{
++ struct utrace_engine *engine;
++ struct utrace_examiner exam;
++ int ret = -ESRCH;
++
++ engine = ptrace_lookup_engine(child);
++ if (IS_ERR(engine))
++ return ret;
++
++ if (child->parent != current)
++ goto out;
++
++ if (unlikely(kill))
++ ret = 0;
++
++ if (!task_is_stopped_or_traced(child))
++ goto out;
++ /*
++ * Make sure our engine has already stopped the child.
++ * Then wait for it to be off the CPU.
++ */
++ if (!utrace_control(child, engine, UTRACE_STOP) &&
++ !utrace_prepare_examine(child, engine, &exam))
++ ret = 0;
++out:
++ utrace_engine_put(engine);
++ return ret;
++}
++
++int ptrace_attach(struct task_struct *task)
++{
++ int retval;
++
++ audit_ptrace(task);
++
++ retval = -EPERM;
++ if (unlikely(task->flags & PF_KTHREAD))
++ goto out;
++ if (same_thread_group(task, current))
++ goto out;
++
++ /*
++ * Protect exec's credential calculations against our interference;
++ * interference; SUID, SGID and LSM creds get determined differently
++ * under ptrace.
++ */
++ retval = -ERESTARTNOINTR;
++ if (mutex_lock_interruptible(&task->cred_guard_mutex))
++ goto out;
++
++ task_lock(task);
++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
++ task_unlock(task);
++ if (retval)
++ goto unlock_creds;
++
++ retval = ptrace_attach_task(task, 0);
++ if (unlikely(retval))
++ goto unlock_creds;
++
++ write_lock_irq(&tasklist_lock);
++ retval = -EPERM;
++ if (unlikely(task->exit_state))
++ goto unlock_tasklist;
++
++ BUG_ON(task->ptrace);
++ task->ptrace = PT_UTRACED;
++ if (capable(CAP_SYS_PTRACE))
++ task->ptrace |= PT_PTRACE_CAP;
++
++ __ptrace_link(task, current);
++ send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
++
++ retval = 0;
++unlock_tasklist:
++ write_unlock_irq(&tasklist_lock);
++unlock_creds:
++ mutex_unlock(&task->cred_guard_mutex);
++out:
++ return retval;
++}
++
++/*
++ * Performs checks and sets PT_UTRACED.
++ * Should be used by all ptrace implementations for PTRACE_TRACEME.
++ */
++int ptrace_traceme(void)
++{
++ bool detach = true;
++ int ret = ptrace_attach_task(current, 0);
++
++ if (unlikely(ret))
++ return ret;
++
++ ret = -EPERM;
++ write_lock_irq(&tasklist_lock);
++ BUG_ON(current->ptrace);
++ ret = security_ptrace_traceme(current->parent);
++ /*
++ * Check PF_EXITING to ensure ->real_parent has not passed
++ * exit_ptrace(). Otherwise we don't report the error but
++ * pretend ->real_parent untraces us right after return.
++ */
++ if (!ret && !(current->real_parent->flags & PF_EXITING)) {
++ current->ptrace = PT_UTRACED;
++ __ptrace_link(current, current->real_parent);
++ detach = false;
++ }
++ write_unlock_irq(&tasklist_lock);
++
++ if (detach)
++ ptrace_abort_attach(current);
++ return ret;
++}
++
++static void ptrace_do_detach(struct task_struct *tracee, unsigned int data)
++{
++ bool detach, release;
++
++ write_lock_irq(&tasklist_lock);
++ /*
++ * This tracee can be already killed. Make sure de_thread() or
++ * our sub-thread doing do_wait() didn't do release_task() yet.
++ */
++ detach = tracee->ptrace != 0;
++ release = false;
++ if (likely(detach))
++ release = __ptrace_detach(current, tracee);
++ write_unlock_irq(&tasklist_lock);
++
++ if (unlikely(release))
++ release_task(tracee);
++ else if (likely(detach))
++ ptrace_detach_task(tracee, data);
++}
++
++int ptrace_detach(struct task_struct *child, unsigned int data)
++{
++ if (!valid_signal(data))
++ return -EIO;
++
++ ptrace_do_detach(child, data);
++
++ return 0;
++}
++
++/*
++ * Detach all tasks we were using ptrace on.
++ */
++void exit_ptrace(struct task_struct *tracer)
++{
++ for (;;) {
++ struct task_struct *tracee = NULL;
++
++ read_lock(&tasklist_lock);
++ if (!list_empty(&tracer->ptraced)) {
++ tracee = list_first_entry(&tracer->ptraced,
++ struct task_struct, ptrace_entry);
++ get_task_struct(tracee);
++ }
++ read_unlock(&tasklist_lock);
++ if (!tracee)
++ break;
++
++ ptrace_do_detach(tracee, -1);
++ put_task_struct(tracee);
++ }
++}
++
++static int ptrace_set_options(struct task_struct *tracee,
++ struct utrace_engine *engine, long data)
++{
++ BUILD_BUG_ON(PTRACE_O_MASK & PTRACE_O_SYSEMU);
++
++ ptrace_set_events(tracee, engine, data & PTRACE_O_MASK);
++ return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
++}
++
++static int ptrace_rw_siginfo(struct task_struct *tracee,
++ struct ptrace_context *ctx,
++ siginfo_t *info, bool write)
++{
++ unsigned long flags;
++ int err;
++
++ switch (get_stop_event(ctx)) {
++ case 0: /* jctl stop */
++ return -EINVAL;
++
++ case PTRACE_EVENT_SIGNAL:
++ err = -ESRCH;
++ if (lock_task_sighand(tracee, &flags)) {
++ if (likely(task_is_traced(tracee))) {
++ if (write)
++ *ctx->siginfo = *info;
++ else
++ *info = *ctx->siginfo;
++ err = 0;
++ }
++ unlock_task_sighand(tracee, &flags);
++ }
++
++ return err;
++
++ default:
++ if (!write) {
++ memset(info, 0, sizeof(*info));
++ info->si_signo = SIGTRAP;
++ info->si_code = ctx->stop_code & PTRACE_EVENT_MASK;
++ info->si_pid = task_pid_vnr(tracee);
++ info->si_uid = task_uid(tracee);
++ }
++
++ return 0;
++ }
++}
++
++static void do_ptrace_notify_stop(struct ptrace_context *ctx,
++ struct task_struct *tracee)
++{
++ /*
++ * This can race with SIGKILL, but we borrow this race from
++ * the old ptrace implementation. ->exit_code is only needed
++ * for wait_task_stopped()->task_stopped_code(), we should
++ * change it to use ptrace_context.
++ */
++ tracee->exit_code = ctx->stop_code & PTRACE_EVENT_MASK;
++ WARN_ON(!tracee->exit_code);
++
++ read_lock(&tasklist_lock);
++ /*
++ * Don't want to allow preemption here, because
++ * sys_ptrace() needs this task to be inactive.
++ */
++ preempt_disable();
++ /*
++ * It can be killed and then released by our subthread,
++ * or ptrace_attach() has not completed yet.
++ */
++ if (task_ptrace(tracee))
++ do_notify_parent_cldstop(tracee, CLD_TRAPPED);
++ read_unlock(&tasklist_lock);
++ preempt_enable_no_resched();
++}
++
++void ptrace_notify_stop(struct task_struct *tracee)
++{
++ struct utrace_engine *engine = ptrace_lookup_engine(tracee);
++
++ if (IS_ERR(engine))
++ return;
++
++ do_ptrace_notify_stop(ptrace_context(engine), tracee);
++ utrace_engine_put(engine);
++}
++
++static int ptrace_resume_action(struct task_struct *tracee,
++ struct utrace_engine *engine, long request)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++ unsigned long events;
++ int action;
++
++ ctx->options &= ~PTRACE_O_SYSEMU;
++ events = engine->flags & ~UTRACE_EVENT_SYSCALL;
++ action = UTRACE_RESUME;
++
++ switch (request) {
++#ifdef PTRACE_SINGLEBLOCK
++ case PTRACE_SINGLEBLOCK:
++ if (unlikely(!arch_has_block_step()))
++ return -EIO;
++ action = UTRACE_BLOCKSTEP;
++ events |= UTRACE_EVENT(SYSCALL_EXIT);
++ break;
++#endif
++
++#ifdef PTRACE_SINGLESTEP
++ case PTRACE_SINGLESTEP:
++ if (unlikely(!arch_has_single_step()))
++ return -EIO;
++ action = UTRACE_SINGLESTEP;
++ events |= UTRACE_EVENT(SYSCALL_EXIT);
++ break;
++#endif
++
++#ifdef PTRACE_SYSEMU
++ case PTRACE_SYSEMU_SINGLESTEP:
++ if (unlikely(!arch_has_single_step()))
++ return -EIO;
++ action = UTRACE_SINGLESTEP;
++ case PTRACE_SYSEMU:
++ ctx->options |= PTRACE_O_SYSEMU;
++ events |= UTRACE_EVENT(SYSCALL_ENTRY);
++ break;
++#endif
++
++ case PTRACE_SYSCALL:
++ events |= UTRACE_EVENT_SYSCALL;
++ break;
++
++ case PTRACE_CONT:
++ break;
++ default:
++ return -EIO;
++ }
++
++ if (events != engine->flags &&
++ utrace_set_events(tracee, engine, events))
++ return -ESRCH;
++
++ return action;
++}
++
++static int ptrace_resume(struct task_struct *tracee,
++ struct utrace_engine *engine,
++ long request, long data)
++{
++ struct ptrace_context *ctx = ptrace_context(engine);
++ int action;
++
++ if (!valid_signal(data))
++ return -EIO;
++
++ action = ptrace_resume_action(tracee, engine, request);
++ if (action < 0)
++ return action;
++
++ switch (get_stop_event(ctx)) {
++ case PTRACE_EVENT_VFORK:
++ if (ctx->options & PTRACE_O_TRACEVFORKDONE) {
++ set_stop_code(ctx, PTRACE_EVENT_VFORK_DONE);
++ action = UTRACE_REPORT;
++ }
++ break;
++
++ case PTRACE_EVENT_EXEC:
++ case PTRACE_EVENT_FORK:
++ case PTRACE_EVENT_CLONE:
++ case PTRACE_EVENT_VFORK_DONE:
++ if (request == PTRACE_SYSCALL) {
++ set_syscall_code(ctx);
++ do_ptrace_notify_stop(ctx, tracee);
++ return 0;
++ }
++
++ if (action != UTRACE_RESUME) {
++ /*
++ * single-stepping. UTRACE_SIGNAL_REPORT will
++ * synthesize a trap to follow the syscall insn.
++ */
++ ctx->signr = SIGTRAP;
++ action = UTRACE_INTERRUPT;
++ }
++ break;
++
++ case PTRACE_EVENT_SYSCALL:
++ if (data)
++ send_sig_info(data, SEND_SIG_PRIV, tracee);
++ break;
++
++ case PTRACE_EVENT_SIGNAL:
++ ctx->signr = data;
++ break;
++ }
++
++ ctx->resume = action;
++ ptrace_wake_up(tracee, engine, action, true);
++ return 0;
++}
++
++int ptrace_request(struct task_struct *child, long request,
++ long addr, long data)
++{
++ struct utrace_engine *engine = ptrace_lookup_engine(child);
++ siginfo_t siginfo;
++ int ret;
++
++ if (unlikely(IS_ERR(engine)))
++ return -ESRCH;
++
++ switch (request) {
++ case PTRACE_PEEKTEXT:
++ case PTRACE_PEEKDATA:
++ ret = generic_ptrace_peekdata(child, addr, data);
++ break;
++ case PTRACE_POKETEXT:
++ case PTRACE_POKEDATA:
++ ret = generic_ptrace_pokedata(child, addr, data);
++ break;
++
++#ifdef PTRACE_OLDSETOPTIONS
++ case PTRACE_OLDSETOPTIONS:
++#endif
++ case PTRACE_SETOPTIONS:
++ ret = ptrace_set_options(child, engine, data);
++ break;
++ case PTRACE_GETEVENTMSG:
++ ret = put_user(ptrace_context(engine)->eventmsg,
++ (unsigned long __user *) data);
++ break;
++
++ case PTRACE_GETSIGINFO:
++ ret = ptrace_rw_siginfo(child, ptrace_context(engine),
++ &siginfo, false);
++ if (!ret)
++ ret = copy_siginfo_to_user((siginfo_t __user *) data,
++ &siginfo);
++ break;
++
++ case PTRACE_SETSIGINFO:
++ if (copy_from_user(&siginfo, (siginfo_t __user *) data,
++ sizeof siginfo))
++ ret = -EFAULT;
++ else
++ ret = ptrace_rw_siginfo(child, ptrace_context(engine),
++ &siginfo, true);
++ break;
++
++ case PTRACE_DETACH: /* detach a process that was attached. */
++ ret = ptrace_detach(child, data);
++ break;
++
++ case PTRACE_KILL:
++ /* Ugly historical behaviour. */
++ if (task_is_traced(child))
++ ptrace_resume(child, engine, PTRACE_CONT, SIGKILL);
++ ret = 0;
++ break;
++
++ default:
++ ret = ptrace_resume(child, engine, request, data);
++ break;
++ }
++
++ utrace_engine_put(engine);
++ return ret;
++}
++
++#if defined CONFIG_COMPAT
++#include <linux/compat.h>
++
++int compat_ptrace_request(struct task_struct *child, compat_long_t request,
++ compat_ulong_t addr, compat_ulong_t data)
++{
++ struct utrace_engine *engine = ptrace_lookup_engine(child);
++ compat_ulong_t __user *datap = compat_ptr(data);
++ compat_ulong_t word;
++ siginfo_t siginfo;
++ int ret;
++
++ if (unlikely(IS_ERR(engine)))
++ return -ESRCH;
++
++ switch (request) {
++ case PTRACE_PEEKTEXT:
++ case PTRACE_PEEKDATA:
++ ret = access_process_vm(child, addr, &word, sizeof(word), 0);
++ if (ret != sizeof(word))
++ ret = -EIO;
++ else
++ ret = put_user(word, datap);
++ break;
++
++ case PTRACE_POKETEXT:
++ case PTRACE_POKEDATA:
++ ret = access_process_vm(child, addr, &data, sizeof(data), 1);
++ ret = (ret != sizeof(data) ? -EIO : 0);
++ break;
++
++ case PTRACE_GETEVENTMSG:
++ ret = put_user((compat_ulong_t)ptrace_context(engine)->eventmsg,
++ datap);
++ break;
++
++ case PTRACE_GETSIGINFO:
++ ret = ptrace_rw_siginfo(child, ptrace_context(engine),
++ &siginfo, false);
++ if (!ret)
++ ret = copy_siginfo_to_user32(
++ (struct compat_siginfo __user *) datap,
++ &siginfo);
++ break;
++
++ case PTRACE_SETSIGINFO:
++ memset(&siginfo, 0, sizeof siginfo);
++ if (copy_siginfo_from_user32(
++ &siginfo, (struct compat_siginfo __user *) datap))
++ ret = -EFAULT;
++ else
++ ret = ptrace_rw_siginfo(child, ptrace_context(engine),
++ &siginfo, true);
++ break;
++
++ default:
++ ret = ptrace_request(child, request, addr, data);
++ }
++
++ utrace_engine_put(engine);
++ return ret;
++}
++#endif /* CONFIG_COMPAT */
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index a408bf7..4e87441 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -16,7 +16,6 @@
+ #include <linux/pagemap.h>
+ #include <linux/smp_lock.h>
+ #include <linux/ptrace.h>
+-#include <linux/utrace.h>
+ #include <linux/security.h>
+ #include <linux/signal.h>
+ #include <linux/audit.h>
+@@ -24,7 +23,286 @@
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+
++int __ptrace_may_access(struct task_struct *task, unsigned int mode)
++{
++ const struct cred *cred = current_cred(), *tcred;
++
++ /* May we inspect the given task?
++ * This check is used both for attaching with ptrace
++ * and for allowing access to sensitive information in /proc.
++ *
++ * ptrace_attach denies several cases that /proc allows
++ * because setting up the necessary parent/child relationship
++ * or halting the specified task is impossible.
++ */
++ int dumpable = 0;
++ /* Don't let security modules deny introspection */
++ if (task == current)
++ return 0;
++ rcu_read_lock();
++ tcred = __task_cred(task);
++ if ((cred->uid != tcred->euid ||
++ cred->uid != tcred->suid ||
++ cred->uid != tcred->uid ||
++ cred->gid != tcred->egid ||
++ cred->gid != tcred->sgid ||
++ cred->gid != tcred->gid) &&
++ !capable(CAP_SYS_PTRACE)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++ smp_rmb();
++ if (task->mm)
++ dumpable = get_dumpable(task->mm);
++ if (!dumpable && !capable(CAP_SYS_PTRACE))
++ return -EPERM;
++
++ return security_ptrace_access_check(task, mode);
++}
++
++bool ptrace_may_access(struct task_struct *task, unsigned int mode)
++{
++ int err;
++ task_lock(task);
++ err = __ptrace_may_access(task, mode);
++ task_unlock(task);
++ return !err;
++}
++
++/*
++ * Called with irqs disabled, returns true if childs should reap themselves.
++ */
++static int ignoring_children(struct sighand_struct *sigh)
++{
++ int ret;
++ spin_lock(&sigh->siglock);
++ ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
++ (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
++ spin_unlock(&sigh->siglock);
++ return ret;
++}
++
++/*
++ * Called with tasklist_lock held for writing.
++ * Unlink a traced task, and clean it up if it was a traced zombie.
++ * Return true if it needs to be reaped with release_task().
++ * (We can't call release_task() here because we already hold tasklist_lock.)
++ *
++ * If it's a zombie, our attachedness prevented normal parent notification
++ * or self-reaping. Do notification now if it would have happened earlier.
++ * If it should reap itself, return true.
++ *
++ * If it's our own child, there is no notification to do. But if our normal
++ * children self-reap, then this child was prevented by ptrace and we must
++ * reap it now, in that case we must also wake up sub-threads sleeping in
++ * do_wait().
++ */
++bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
++{
++ __ptrace_unlink(p);
++
++ if (p->exit_state == EXIT_ZOMBIE) {
++ if (!task_detached(p) && thread_group_empty(p)) {
++ if (!same_thread_group(p->real_parent, tracer))
++ do_notify_parent(p, p->exit_signal);
++ else if (ignoring_children(tracer->sighand)) {
++ __wake_up_parent(p, tracer);
++ p->exit_signal = -1;
++ }
++ }
++ if (task_detached(p)) {
++ /* Mark it as in the process of being reaped. */
++ p->exit_state = EXIT_DEAD;
++ return true;
++ }
++ }
++
++ return false;
++}
++
++int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
++{
++ int copied = 0;
++
++ while (len > 0) {
++ char buf[128];
++ int this_len, retval;
++
++ this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
++ retval = access_process_vm(tsk, src, buf, this_len, 0);
++ if (!retval) {
++ if (copied)
++ break;
++ return -EIO;
++ }
++ if (copy_to_user(dst, buf, retval))
++ return -EFAULT;
++ copied += retval;
++ src += retval;
++ dst += retval;
++ len -= retval;
++ }
++ return copied;
++}
++
++int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
++{
++ int copied = 0;
++
++ while (len > 0) {
++ char buf[128];
++ int this_len, retval;
++
++ this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
++ if (copy_from_user(buf, src, this_len))
++ return -EFAULT;
++ retval = access_process_vm(tsk, dst, buf, this_len, 1);
++ if (!retval) {
++ if (copied)
++ break;
++ return -EIO;
++ }
++ copied += retval;
++ src += retval;
++ dst += retval;
++ len -= retval;
++ }
++ return copied;
++}
++
++static struct task_struct *ptrace_get_task_struct(pid_t pid)
++{
++ struct task_struct *child;
++
++ rcu_read_lock();
++ child = find_task_by_vpid(pid);
++ if (child)
++ get_task_struct(child);
++ rcu_read_unlock();
++
++ if (!child)
++ return ERR_PTR(-ESRCH);
++ return child;
++}
++
++#ifndef arch_ptrace_attach
++#define arch_ptrace_attach(child) do { } while (0)
++#endif
+
++SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
++{
++ struct task_struct *child;
++ long ret;
++
++ /*
++ * This lock_kernel fixes a subtle race with suid exec
++ */
++ lock_kernel();
++ if (request == PTRACE_TRACEME) {
++ ret = ptrace_traceme();
++ if (!ret)
++ arch_ptrace_attach(current);
++ goto out;
++ }
++
++ child = ptrace_get_task_struct(pid);
++ if (IS_ERR(child)) {
++ ret = PTR_ERR(child);
++ goto out;
++ }
++
++ if (request == PTRACE_ATTACH) {
++ ret = ptrace_attach(child);
++ /*
++ * Some architectures need to do book-keeping after
++ * a ptrace attach.
++ */
++ if (!ret)
++ arch_ptrace_attach(child);
++ goto out_put_task_struct;
++ }
++
++ ret = ptrace_check_attach(child, request == PTRACE_KILL);
++ if (ret < 0)
++ goto out_put_task_struct;
++
++ ret = arch_ptrace(child, request, addr, data);
++
++ out_put_task_struct:
++ put_task_struct(child);
++ out:
++ unlock_kernel();
++ return ret;
++}
++
++int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
++{
++ unsigned long tmp;
++ int copied;
++
++ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
++ if (copied != sizeof(tmp))
++ return -EIO;
++ return put_user(tmp, (unsigned long __user *)data);
++}
++
++int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
++{
++ int copied;
++
++ copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
++ return (copied == sizeof(data)) ? 0 : -EIO;
++}
++
++#if defined CONFIG_COMPAT
++#include <linux/compat.h>
++
++asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
++ compat_long_t addr, compat_long_t data)
++{
++ struct task_struct *child;
++ long ret;
++
++ /*
++ * This lock_kernel fixes a subtle race with suid exec
++ */
++ lock_kernel();
++ if (request == PTRACE_TRACEME) {
++ ret = ptrace_traceme();
++ goto out;
++ }
++
++ child = ptrace_get_task_struct(pid);
++ if (IS_ERR(child)) {
++ ret = PTR_ERR(child);
++ goto out;
++ }
++
++ if (request == PTRACE_ATTACH) {
++ ret = ptrace_attach(child);
++ /*
++ * Some architectures need to do book-keeping after
++ * a ptrace attach.
++ */
++ if (!ret)
++ arch_ptrace_attach(child);
++ goto out_put_task_struct;
++ }
++
++ ret = ptrace_check_attach(child, request == PTRACE_KILL);
++ if (!ret)
++ ret = compat_arch_ptrace(child, request, addr, data);
++
++ out_put_task_struct:
++ put_task_struct(child);
++ out:
++ unlock_kernel();
++ return ret;
++}
++#endif /* CONFIG_COMPAT */
++
++#ifndef CONFIG_UTRACE
+ /*
+ * ptrace a task: make the debugger its new parent and
+ * move it to the ptrace list.
+@@ -101,76 +379,21 @@ int ptrace_check_attach(struct task_stru
+ /*
+ * child->sighand can't be NULL, release_task()
+ * does ptrace_unlink() before __exit_signal().
+- */
+- spin_lock_irq(&child->sighand->siglock);
+- if (task_is_stopped(child))
+- child->state = TASK_TRACED;
+- else if (!task_is_traced(child) && !kill)
+- ret = -ESRCH;
+- spin_unlock_irq(&child->sighand->siglock);
+- }
+- read_unlock(&tasklist_lock);
+-
+- if (!ret && !kill)
+- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
+-
+- /* All systems go.. */
+- return ret;
+-}
+-
+-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+-{
+- const struct cred *cred = current_cred(), *tcred;
+-
+- /* May we inspect the given task?
+- * This check is used both for attaching with ptrace
+- * and for allowing access to sensitive information in /proc.
+- *
+- * ptrace_attach denies several cases that /proc allows
+- * because setting up the necessary parent/child relationship
+- * or halting the specified task is impossible.
+- */
+- int dumpable = 0;
+- /* Don't let security modules deny introspection */
+- if (task == current)
+- return 0;
+- rcu_read_lock();
+- tcred = __task_cred(task);
+- if ((cred->uid != tcred->euid ||
+- cred->uid != tcred->suid ||
+- cred->uid != tcred->uid ||
+- cred->gid != tcred->egid ||
+- cred->gid != tcred->sgid ||
+- cred->gid != tcred->gid) &&
+- !capable(CAP_SYS_PTRACE)) {
+- rcu_read_unlock();
+- return -EPERM;
+- }
+- rcu_read_unlock();
+- smp_rmb();
+- if (task->mm)
+- dumpable = get_dumpable(task->mm);
+- if (!dumpable && !capable(CAP_SYS_PTRACE))
+- return -EPERM;
+-
+- return security_ptrace_access_check(task, mode);
+-}
++ */
++ spin_lock_irq(&child->sighand->siglock);
++ if (task_is_stopped(child))
++ child->state = TASK_TRACED;
++ else if (!task_is_traced(child) && !kill)
++ ret = -ESRCH;
++ spin_unlock_irq(&child->sighand->siglock);
++ }
++ read_unlock(&tasklist_lock);
+
+-bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+-{
+- int err;
+- task_lock(task);
+- err = __ptrace_may_access(task, mode);
+- task_unlock(task);
+- return !err;
+-}
++ if (!ret && !kill)
++ ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
+
+-/*
+- * For experimental use of utrace, exclude ptrace on the same task.
+- */
+-static inline bool exclude_ptrace(struct task_struct *task)
+-{
+- return unlikely(!!task_utrace_flags(task));
++ /* All systems go.. */
++ return ret;
+ }
+
+ int ptrace_attach(struct task_struct *task)
+@@ -196,8 +419,6 @@ int ptrace_attach(struct task_struct *ta
+
+ task_lock(task);
+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
+- if (!retval && exclude_ptrace(task))
+- retval = -EBUSY;
+ task_unlock(task);
+ if (retval)
+ goto unlock_creds;
+@@ -235,9 +456,6 @@ int ptrace_traceme(void)
+ {
+ int ret = -EPERM;
+
+- if (exclude_ptrace(current)) /* XXX locking */
+- return -EBUSY;
+-
+ write_lock_irq(&tasklist_lock);
+ /* Are we already being traced? */
+ if (!current->ptrace) {
+@@ -257,57 +475,6 @@ int ptrace_traceme(void)
+ return ret;
+ }
+
+-/*
+- * Called with irqs disabled, returns true if childs should reap themselves.
+- */
+-static int ignoring_children(struct sighand_struct *sigh)
+-{
+- int ret;
+- spin_lock(&sigh->siglock);
+- ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
+- (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
+- spin_unlock(&sigh->siglock);
+- return ret;
+-}
+-
+-/*
+- * Called with tasklist_lock held for writing.
+- * Unlink a traced task, and clean it up if it was a traced zombie.
+- * Return true if it needs to be reaped with release_task().
+- * (We can't call release_task() here because we already hold tasklist_lock.)
+- *
+- * If it's a zombie, our attachedness prevented normal parent notification
+- * or self-reaping. Do notification now if it would have happened earlier.
+- * If it should reap itself, return true.
+- *
+- * If it's our own child, there is no notification to do. But if our normal
+- * children self-reap, then this child was prevented by ptrace and we must
+- * reap it now, in that case we must also wake up sub-threads sleeping in
+- * do_wait().
+- */
+-bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
+-{
+- __ptrace_unlink(p);
+-
+- if (p->exit_state == EXIT_ZOMBIE) {
+- if (!task_detached(p) && thread_group_empty(p)) {
+- if (!same_thread_group(p->real_parent, tracer))
+- do_notify_parent(p, p->exit_signal);
+- else if (ignoring_children(tracer->sighand)) {
+- __wake_up_parent(p, tracer);
+- p->exit_signal = -1;
+- }
+- }
+- if (task_detached(p)) {
+- /* Mark it as in the process of being reaped. */
+- p->exit_state = EXIT_DEAD;
+- return true;
+- }
+- }
+-
+- return false;
+-}
+-
+ int ptrace_detach(struct task_struct *child, unsigned int data)
+ {
+ bool dead = false;
+@@ -361,56 +528,6 @@ void exit_ptrace(struct task_struct *tra
+ }
+ }
+
+-int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
+-{
+- int copied = 0;
+-
+- while (len > 0) {
+- char buf[128];
+- int this_len, retval;
+-
+- this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
+- retval = access_process_vm(tsk, src, buf, this_len, 0);
+- if (!retval) {
+- if (copied)
+- break;
+- return -EIO;
+- }
+- if (copy_to_user(dst, buf, retval))
+- return -EFAULT;
+- copied += retval;
+- src += retval;
+- dst += retval;
+- len -= retval;
+- }
+- return copied;
+-}
+-
+-int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
+-{
+- int copied = 0;
+-
+- while (len > 0) {
+- char buf[128];
+- int this_len, retval;
+-
+- this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
+- if (copy_from_user(buf, src, this_len))
+- return -EFAULT;
+- retval = access_process_vm(tsk, dst, buf, this_len, 1);
+- if (!retval) {
+- if (copied)
+- break;
+- return -EIO;
+- }
+- copied += retval;
+- src += retval;
+- dst += retval;
+- len -= retval;
+- }
+- return copied;
+-}
+-
+ static int ptrace_setoptions(struct task_struct *child, long data)
+ {
+ child->ptrace &= ~PT_TRACE_MASK;
+@@ -594,93 +710,7 @@ int ptrace_request(struct task_struct *c
+ return ret;
+ }
+
+-static struct task_struct *ptrace_get_task_struct(pid_t pid)
+-{
+- struct task_struct *child;
+-
+- rcu_read_lock();
+- child = find_task_by_vpid(pid);
+- if (child)
+- get_task_struct(child);
+- rcu_read_unlock();
+-
+- if (!child)
+- return ERR_PTR(-ESRCH);
+- return child;
+-}
+-
+-#ifndef arch_ptrace_attach
+-#define arch_ptrace_attach(child) do { } while (0)
+-#endif
+-
+-SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
+-{
+- struct task_struct *child;
+- long ret;
+-
+- /*
+- * This lock_kernel fixes a subtle race with suid exec
+- */
+- lock_kernel();
+- if (request == PTRACE_TRACEME) {
+- ret = ptrace_traceme();
+- if (!ret)
+- arch_ptrace_attach(current);
+- goto out;
+- }
+-
+- child = ptrace_get_task_struct(pid);
+- if (IS_ERR(child)) {
+- ret = PTR_ERR(child);
+- goto out;
+- }
+-
+- if (request == PTRACE_ATTACH) {
+- ret = ptrace_attach(child);
+- /*
+- * Some architectures need to do book-keeping after
+- * a ptrace attach.
+- */
+- if (!ret)
+- arch_ptrace_attach(child);
+- goto out_put_task_struct;
+- }
+-
+- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+- if (ret < 0)
+- goto out_put_task_struct;
+-
+- ret = arch_ptrace(child, request, addr, data);
+-
+- out_put_task_struct:
+- put_task_struct(child);
+- out:
+- unlock_kernel();
+- return ret;
+-}
+-
+-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
+-{
+- unsigned long tmp;
+- int copied;
+-
+- copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+- if (copied != sizeof(tmp))
+- return -EIO;
+- return put_user(tmp, (unsigned long __user *)data);
+-}
+-
+-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
+-{
+- int copied;
+-
+- copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
+- return (copied == sizeof(data)) ? 0 : -EIO;
+-}
+-
+ #if defined CONFIG_COMPAT
+-#include <linux/compat.h>
+-
+ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+ compat_ulong_t addr, compat_ulong_t data)
+ {
+@@ -732,47 +762,5 @@ int compat_ptrace_request(struct task_st
+
+ return ret;
+ }
+-
+-asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+- compat_long_t addr, compat_long_t data)
+-{
+- struct task_struct *child;
+- long ret;
+-
+- /*
+- * This lock_kernel fixes a subtle race with suid exec
+- */
+- lock_kernel();
+- if (request == PTRACE_TRACEME) {
+- ret = ptrace_traceme();
+- goto out;
+- }
+-
+- child = ptrace_get_task_struct(pid);
+- if (IS_ERR(child)) {
+- ret = PTR_ERR(child);
+- goto out;
+- }
+-
+- if (request == PTRACE_ATTACH) {
+- ret = ptrace_attach(child);
+- /*
+- * Some architectures need to do book-keeping after
+- * a ptrace attach.
+- */
+- if (!ret)
+- arch_ptrace_attach(child);
+- goto out_put_task_struct;
+- }
+-
+- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+- if (!ret)
+- ret = compat_arch_ptrace(child, request, addr, data);
+-
+- out_put_task_struct:
+- put_task_struct(child);
+- out:
+- unlock_kernel();
+- return ret;
+-}
+ #endif /* CONFIG_COMPAT */
++#endif /* CONFIG_UTRACE */
+diff --git a/kernel/utrace.c b/kernel/utrace.c
+index 84d965d..ead1f13 100644
+--- a/kernel/utrace.c
++++ b/kernel/utrace.c
+@@ -811,6 +811,22 @@ relock:
+ spin_unlock_irq(&task->sighand->siglock);
+ spin_unlock(&utrace->lock);
+
++ /*
++ * If ptrace is among the reasons for this stop, do its
++ * notification now. This could not just be done in
++ * ptrace's own event report callbacks because it has to
++ * be done after we are in TASK_TRACED. This makes the
++ * synchronization with ptrace_do_wait() work right.
++ *
++ * It's only because of the bad old overloading of the do_wait()
++ * logic for handling ptrace stops that we need this special case
++ * here. One day we will clean up ptrace so it does not need to
++ * work this way. New things that are designed sensibly don't need
++ * a wakeup that synchronizes with tasklist_lock and ->state, so
++ * the proper utrace API does not try to support this weirdness.
++ */
++ ptrace_notify_stop(task);
++
+ schedule();
+
+ utrace_finish_stop();
diff --git a/freed-ora/current/F-12/linux-2.6-utrace.patch b/freed-ora/current/F-12/linux-2.6-utrace.patch
new file mode 100644
index 000000000..8d1adf8c2
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-utrace.patch
@@ -0,0 +1,4163 @@
+utrace core
+
+This adds the utrace facility, a new modular interface in the kernel for
+implementing user thread tracing and debugging. This fits on top of the
+tracehook_* layer, so the new code is well-isolated.
+
+The new interface is in <linux/utrace.h> and the DocBook utrace book
+describes it. It allows for multiple separate tracing engines to work in
+parallel without interfering with each other. Higher-level tracing
+facilities can be implemented as loadable kernel modules using this layer.
+
+The new facility is made optional under CONFIG_UTRACE.
+When this is not enabled, no new code is added.
+It can only be enabled on machines that have all the
+prerequisites and select CONFIG_HAVE_ARCH_TRACEHOOK.
+
+In this initial version, utrace and ptrace do not play together at all.
+If ptrace is attached to a thread, the attach calls in the utrace kernel
+API return -EBUSY. If utrace is attached to a thread, the PTRACE_ATTACH
+or PTRACE_TRACEME request will return EBUSY to userland. The old ptrace
+code is otherwise unchanged and nothing using ptrace should be affected
+by this patch as long as utrace is not used at the same time. In the
+future we can clean up the ptrace implementation and rework it to use
+the utrace API.
+
+Signed-off-by: Roland McGrath <roland@redhat.com>
+---
+ Documentation/DocBook/Makefile | 2 +-
+ Documentation/DocBook/utrace.tmpl | 590 +++++++++
+ fs/proc/array.c | 3 +
+ include/linux/sched.h | 5 +
+ include/linux/tracehook.h | 87 ++-
+ include/linux/utrace.h | 692 +++++++++++
+ init/Kconfig | 9 +
+ kernel/Makefile | 1 +
+ kernel/fork.c | 3 +
+ kernel/ptrace.c | 14 +
+ kernel/utrace.c | 2436 +++++++++++++++++++++++++++++++++++++
+ 11 files changed, 3840 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
+index ab8300f..95f59e4 100644
+--- a/Documentation/DocBook/Makefile
++++ b/Documentation/DocBook/Makefile
+@@ -9,7 +9,7 @@
+ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
+ kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
+ procfs-guide.xml writing_usb_driver.xml networking.xml \
+- kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
++ kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml utrace.xml \
+ gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
+ genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
+ mac80211.xml debugobjects.xml sh.xml regulator.xml \
+diff --git a/Documentation/DocBook/utrace.tmpl b/Documentation/DocBook/utrace.tmpl
+new file mode 100644
+index ...e149f49 100644
+--- /dev/null
++++ b/Documentation/DocBook/utrace.tmpl
+@@ -0,0 +1,590 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
++"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
++
++<book id="utrace">
++ <bookinfo>
++ <title>The utrace User Debugging Infrastructure</title>
++ </bookinfo>
++
++ <toc></toc>
++
++ <chapter id="concepts"><title>utrace concepts</title>
++
++ <sect1 id="intro"><title>Introduction</title>
++
++ <para>
++ <application>utrace</application> is infrastructure code for tracing
++ and controlling user threads. This is the foundation for writing
++ tracing engines, which can be loadable kernel modules.
++ </para>
++
++ <para>
++ The basic actors in <application>utrace</application> are the thread
++ and the tracing engine. A tracing engine is some body of code that
++ calls into the <filename>&lt;linux/utrace.h&gt;</filename>
++ interfaces, represented by a <structname>struct
++ utrace_engine_ops</structname>. (Usually it's a kernel module,
++ though the legacy <function>ptrace</function> support is a tracing
++ engine that is not in a kernel module.) The interface operates on
++ individual threads (<structname>struct task_struct</structname>).
++ If an engine wants to treat several threads as a group, that is up
++ to its higher-level code.
++ </para>
++
++ <para>
++ Tracing begins by attaching an engine to a thread, using
++ <function>utrace_attach_task</function> or
++ <function>utrace_attach_pid</function>. If successful, it returns a
++ pointer that is the handle used in all other calls.
++ </para>
++
++ </sect1>
++
++ <sect1 id="callbacks"><title>Events and Callbacks</title>
++
++ <para>
++ An attached engine does nothing by default. An engine makes something
++ happen by requesting callbacks via <function>utrace_set_events</function>
++ and poking the thread with <function>utrace_control</function>.
++ The synchronization issues related to these two calls
++ are discussed further below in <xref linkend="teardown"/>.
++ </para>
++
++ <para>
++ Events are specified using the macro
++ <constant>UTRACE_EVENT(<replaceable>type</replaceable>)</constant>.
++ Each event type is associated with a callback in <structname>struct
++ utrace_engine_ops</structname>. A tracing engine can leave unused
++ callbacks <constant>NULL</constant>. The only callbacks required
++ are those used by the event flags it sets.
++ </para>
++
++ <para>
++ Many engines can be attached to each thread. When a thread has an
++ event, each engine gets a callback if it has set the event flag for
++ that event type. For most events, engines are called in the order they
++ attached. Engines that attach after the event has occurred do not get
++ callbacks for that event. This includes any new engines just attached
++ by an existing engine's callback function. Once the sequence of
++ callbacks for that one event has completed, such new engines are then
++ eligible in the next sequence that starts when there is another event.
++ </para>
++
++ <para>
++ Event reporting callbacks have details particular to the event type,
++ but are all called in similar environments and have the same
++ constraints. Callbacks are made from safe points, where no locks
++ are held, no special resources are pinned (usually), and the
++ user-mode state of the thread is accessible. So, callback code has
++ a pretty free hand. But to be a good citizen, callback code should
++ never block for long periods. It is fine to block in
++ <function>kmalloc</function> and the like, but never wait for i/o or
++ for user mode to do something. If you need the thread to wait, use
++ <constant>UTRACE_STOP</constant> and return from the callback
++ quickly. When your i/o finishes or whatever, you can use
++ <function>utrace_control</function> to resume the thread.
++ </para>
++
++ <para>
++ The <constant>UTRACE_EVENT(SYSCALL_ENTRY)</constant> event is a special
++ case. While other events happen in the kernel when it will return to
++ user mode soon, this event happens when entering the kernel before it
++ will proceed with the work requested from user mode. Because of this
++ difference, the <function>report_syscall_entry</function> callback is
++ special in two ways. For this event, engines are called in reverse of
++ the normal order (this includes the <function>report_quiesce</function>
++ call that precedes a <function>report_syscall_entry</function> call).
++ This preserves the semantics that the last engine to attach is called
++ "closest to user mode"--the engine that is first to see a thread's user
++ state when it enters the kernel is also the last to see that state when
++ the thread returns to user mode. For the same reason, if these
++ callbacks use <constant>UTRACE_STOP</constant> (see the next section),
++ the thread stops immediately after callbacks rather than only when it's
++ ready to return to user mode; when allowed to resume, it will actually
++ attempt the system call indicated by the register values at that time.
++ </para>
++
++ </sect1>
++
++ <sect1 id="safely"><title>Stopping Safely</title>
++
++ <sect2 id="well-behaved"><title>Writing well-behaved callbacks</title>
++
++ <para>
++ Well-behaved callbacks are important to maintain two essential
++ properties of the interface. The first of these is that unrelated
++ tracing engines should not interfere with each other. If your engine's
++ event callback does not return quickly, then another engine won't get
++ the event notification in a timely manner. The second important
++ property is that tracing should be as noninvasive as possible to the
++ normal operation of the system overall and of the traced thread in
++ particular. That is, attached tracing engines should not perturb a
++ thread's behavior, except to the extent that changing its user-visible
++ state is explicitly what you want to do. (Obviously some perturbation
++ is unavoidable, primarily timing changes, ranging from small delays due
++ to the overhead of tracing, to arbitrary pauses in user code execution
++ when a user stops a thread with a debugger for examination.) Even when
++ you explicitly want the perturbation of making the traced thread block,
++ just blocking directly in your callback has more unwanted effects. For
++ example, the <constant>CLONE</constant> event callbacks are called when
++ the new child thread has been created but not yet started running; the
++ child can never be scheduled until the <constant>CLONE</constant>
++ tracing callbacks return. (This allows engines tracing the parent to
++ attach to the child.) If a <constant>CLONE</constant> event callback
++ blocks the parent thread, it also prevents the child thread from
++ running (even to process a <constant>SIGKILL</constant>). If what you
++ want is to make both the parent and child block, then use
++ <function>utrace_attach_task</function> on the child and then use
++ <constant>UTRACE_STOP</constant> on both threads. A more crucial
++ problem with blocking in callbacks is that it can prevent
++ <constant>SIGKILL</constant> from working. A thread that is blocking
++ due to <constant>UTRACE_STOP</constant> will still wake up and die
++ immediately when sent a <constant>SIGKILL</constant>, as all threads
++ should. Relying on the <application>utrace</application>
++ infrastructure rather than on private synchronization calls in event
++ callbacks is an important way to help keep tracing robustly
++ noninvasive.
++ </para>
++
++ </sect2>
++
++ <sect2 id="UTRACE_STOP"><title>Using <constant>UTRACE_STOP</constant></title>
++
++ <para>
++ To control another thread and access its state, it must be stopped
++ with <constant>UTRACE_STOP</constant>. This means that it is
++ stopped and won't start running again while we access it. When a
++ thread is not already stopped, <function>utrace_control</function>
++ returns <constant>-EINPROGRESS</constant> and an engine must wait
++ for an event callback when the thread is ready to stop. The thread
++ may be running on another CPU or may be blocked. When it is ready
++ to be examined, it will make callbacks to engines that set the
++ <constant>UTRACE_EVENT(QUIESCE)</constant> event bit. To wake up an
++ interruptible wait, use <constant>UTRACE_INTERRUPT</constant>.
++ </para>
++
++ <para>
++ As long as some engine has used <constant>UTRACE_STOP</constant> and
++ not called <function>utrace_control</function> to resume the thread,
++ then the thread will remain stopped. <constant>SIGKILL</constant>
++ will wake it up, but it will not run user code. When the stop is
++ cleared with <function>utrace_control</function> or a callback
++ return value, the thread starts running again.
++ (See also <xref linkend="teardown"/>.)
++ </para>
++
++ </sect2>
++
++ </sect1>
++
++ <sect1 id="teardown"><title>Tear-down Races</title>
++
++ <sect2 id="SIGKILL"><title>Primacy of <constant>SIGKILL</constant></title>
++ <para>
++ Ordinarily synchronization issues for tracing engines are kept fairly
++ straightforward by using <constant>UTRACE_STOP</constant>. You ask a
++ thread to stop, and then once it makes the
++ <function>report_quiesce</function> callback it cannot do anything else
++ that would result in another callback, until you let it with a
++ <function>utrace_control</function> call. This simple arrangement
++ avoids complex and error-prone code in each one of a tracing engine's
++ event callbacks to keep them serialized with the engine's other
++ operations done on that thread from another thread of control.
++ However, giving tracing engines complete power to keep a traced thread
++ stuck in place runs afoul of a more important kind of simplicity that
++ the kernel overall guarantees: nothing can prevent or delay
++ <constant>SIGKILL</constant> from making a thread die and release its
++ resources. To preserve this important property of
++ <constant>SIGKILL</constant>, it as a special case can break
++ <constant>UTRACE_STOP</constant> like nothing else normally can. This
++ includes both explicit <constant>SIGKILL</constant> signals and the
++ implicit <constant>SIGKILL</constant> sent to each other thread in the
++ same thread group by a thread doing an exec, or processing a fatal
++ signal, or making an <function>exit_group</function> system call. A
++ tracing engine can prevent a thread from beginning the exit or exec or
++ dying by signal (other than <constant>SIGKILL</constant>) if it is
++ attached to that thread, but once the operation begins, no tracing
++ engine can prevent or delay all other threads in the same thread group
++ dying.
++ </para>
++ </sect2>
++
++ <sect2 id="reap"><title>Final callbacks</title>
++ <para>
++ The <function>report_reap</function> callback is always the final event
++ in the life cycle of a traced thread. Tracing engines can use this as
++ the trigger to clean up their own data structures. The
++ <function>report_death</function> callback is always the penultimate
++ event a tracing engine might see; it's seen unless the thread was
++ already in the midst of dying when the engine attached. Many tracing
++ engines will have no interest in when a parent reaps a dead process,
++ and nothing they want to do with a zombie thread once it dies; for
++ them, the <function>report_death</function> callback is the natural
++ place to clean up data structures and detach. To facilitate writing
++ such engines robustly, given the asynchrony of
++ <constant>SIGKILL</constant>, and without error-prone manual
++ implementation of synchronization schemes, the
++ <application>utrace</application> infrastructure provides some special
++ guarantees about the <function>report_death</function> and
++ <function>report_reap</function> callbacks. It still takes some care
++ to be sure your tracing engine is robust to tear-down races, but these
++ rules make it reasonably straightforward and concise to handle a lot of
++ corner cases correctly.
++ </para>
++ </sect2>
++
++ <sect2 id="refcount"><title>Engine and task pointers</title>
++ <para>
++ The first sort of guarantee concerns the core data structures
++ themselves. <structname>struct utrace_engine</structname> is
++ a reference-counted data structure. While you hold a reference, an
++ engine pointer will always stay valid so that you can safely pass it to
++ any <application>utrace</application> call. Each call to
++ <function>utrace_attach_task</function> or
++ <function>utrace_attach_pid</function> returns an engine pointer with a
++ reference belonging to the caller. You own that reference until you
++ drop it using <function>utrace_engine_put</function>. There is an
++ implicit reference on the engine while it is attached. So if you drop
++ your only reference, and then use
++ <function>utrace_attach_task</function> without
++ <constant>UTRACE_ATTACH_CREATE</constant> to look up that same engine,
++ you will get the same pointer with a new reference to replace the one
++ you dropped, just like calling <function>utrace_engine_get</function>.
++ When an engine has been detached, either explicitly with
++ <constant>UTRACE_DETACH</constant> or implicitly after
++ <function>report_reap</function>, then any references you hold are all
++ that keep the old engine pointer alive.
++ </para>
++
++ <para>
++ There is nothing a kernel module can do to keep a <structname>struct
++ task_struct</structname> alive outside of
++ <function>rcu_read_lock</function>. When the task dies and is reaped
++ by its parent (or itself), that structure can be freed so that any
++ dangling pointers you have stored become invalid.
++ <application>utrace</application> will not prevent this, but it can
++ help you detect it safely. By definition, a task that has been reaped
++ has had all its engines detached. All
++ <application>utrace</application> calls can be safely called on a
++ detached engine if the caller holds a reference on that engine pointer,
++ even if the task pointer passed in the call is invalid. All calls
++ return <constant>-ESRCH</constant> for a detached engine, which tells
++ you that the task pointer you passed could be invalid now. Since
++ <function>utrace_control</function> and
++ <function>utrace_set_events</function> do not block, you can call those
++ inside a <function>rcu_read_lock</function> section and be sure after
++ they don't return <constant>-ESRCH</constant> that the task pointer is
++ still valid until <function>rcu_read_unlock</function>. The
++ infrastructure never holds task references of its own. Though neither
++ <function>rcu_read_lock</function> nor any other lock is held while
++ making a callback, it's always guaranteed that the <structname>struct
++ task_struct</structname> and the <structname>struct
++ utrace_engine</structname> passed as arguments remain valid
++ until the callback function returns.
++ </para>
++
++ <para>
++ The common means for safely holding task pointers that is available to
++ kernel modules is to use <structname>struct pid</structname>, which
++ permits <function>put_pid</function> from kernel modules. When using
++ that, the calls <function>utrace_attach_pid</function>,
++ <function>utrace_control_pid</function>,
++ <function>utrace_set_events_pid</function>, and
++ <function>utrace_barrier_pid</function> are available.
++ </para>
++ </sect2>
++
++ <sect2 id="reap-after-death">
++ <title>
++ Serialization of <constant>DEATH</constant> and <constant>REAP</constant>
++ </title>
++ <para>
++ The second guarantee is the serialization of
++ <constant>DEATH</constant> and <constant>REAP</constant> event
++ callbacks for a given thread. The actual reaping by the parent
++ (<function>release_task</function> call) can occur simultaneously
++ while the thread is still doing the final steps of dying, including
++ the <function>report_death</function> callback. If a tracing engine
++ has requested both <constant>DEATH</constant> and
++ <constant>REAP</constant> event reports, it's guaranteed that the
++ <function>report_reap</function> callback will not be made until
++ after the <function>report_death</function> callback has returned.
++ If the <function>report_death</function> callback itself detaches
++ from the thread, then the <function>report_reap</function> callback
++ will never be made. Thus it is safe for a
++ <function>report_death</function> callback to clean up data
++ structures and detach.
++ </para>
++ </sect2>
++
++ <sect2 id="interlock"><title>Interlock with final callbacks</title>
++ <para>
++ The final sort of guarantee is that a tracing engine will know for sure
++ whether or not the <function>report_death</function> and/or
++ <function>report_reap</function> callbacks will be made for a certain
++ thread. These tear-down races are disambiguated by the error return
++ values of <function>utrace_set_events</function> and
++ <function>utrace_control</function>. Normally
++ <function>utrace_control</function> called with
++ <constant>UTRACE_DETACH</constant> returns zero, and this means that no
++ more callbacks will be made. If the thread is in the midst of dying,
++ it returns <constant>-EALREADY</constant> to indicate that the
++ <constant>report_death</constant> callback may already be in progress;
++ when you get this error, you know that any cleanup your
++ <function>report_death</function> callback does is about to happen or
++ has just happened--note that if the <function>report_death</function>
++ callback does not detach, the engine remains attached until the thread
++ gets reaped. If the thread is in the midst of being reaped,
++ <function>utrace_control</function> returns <constant>-ESRCH</constant>
++ to indicate that the <function>report_reap</function> callback may
++ already be in progress; this means the engine is implicitly detached
++ when the callback completes. This makes it possible for a tracing
++ engine that has decided asynchronously to detach from a thread to
++ safely clean up its data structures, knowing that no
++ <function>report_death</function> or <function>report_reap</function>
++ callback will try to do the same. <constant>utrace_detach</constant>
++ returns <constant>-ESRCH</constant> when the <structname>struct
++ utrace_engine</structname> has already been detached, but is
++ still a valid pointer because of its reference count. A tracing engine
++ can use this to safely synchronize its own independent multiple threads
++ of control with each other and with its event callbacks that detach.
++ </para>
++
++ <para>
++ In the same vein, <function>utrace_set_events</function> normally
++ returns zero; if the target thread was stopped before the call, then
++ after a successful call, no event callbacks not requested in the new
++ flags will be made. It fails with <constant>-EALREADY</constant> if
++ you try to clear <constant>UTRACE_EVENT(DEATH)</constant> when the
++ <function>report_death</function> callback may already have begun, if
++ you try to clear <constant>UTRACE_EVENT(REAP)</constant> when the
++ <function>report_reap</function> callback may already have begun, or if
++ you try to newly set <constant>UTRACE_EVENT(DEATH)</constant> or
++ <constant>UTRACE_EVENT(QUIESCE)</constant> when the target is already
++ dead or dying. Like <function>utrace_control</function>, it returns
++ <constant>-ESRCH</constant> when the thread has already been detached
++ (including forcible detach on reaping). This lets the tracing engine
++ know for sure which event callbacks it will or won't see after
++ <function>utrace_set_events</function> has returned. By checking for
++ errors, it can know whether to clean up its data structures immediately
++ or to let its callbacks do the work.
++ </para>
++ </sect2>
++
++ <sect2 id="barrier"><title>Using <function>utrace_barrier</function></title>
++ <para>
++ When a thread is safely stopped, calling
++ <function>utrace_control</function> with <constant>UTRACE_DETACH</constant>
++ or calling <function>utrace_set_events</function> to disable some events
++ ensures synchronously that your engine won't get any more of the callbacks
++ that have been disabled (none at all when detaching). But these can also
++ be used while the thread is not stopped, when it might be simultaneously
++ making a callback to your engine. For this situation, these calls return
++ <constant>-EINPROGRESS</constant> when it's possible a callback is in
++ progress. If you are not prepared to have your old callbacks still run,
++ then you can synchronize to be sure all the old callbacks are finished,
++ using <function>utrace_barrier</function>. This is necessary if the
++ kernel module containing your callback code is going to be unloaded.
++ </para>
++ <para>
++ After using <constant>UTRACE_DETACH</constant> once, further calls to
++ <function>utrace_control</function> with the same engine pointer will
++ return <constant>-ESRCH</constant>. In contrast, after getting
++ <constant>-EINPROGRESS</constant> from
++ <function>utrace_set_events</function>, you can call
++ <function>utrace_set_events</function> again later and if it returns zero
++ then know the old callbacks have finished.
++ </para>
++ <para>
++ Unlike all other calls, <function>utrace_barrier</function> (and
++ <function>utrace_barrier_pid</function>) will accept any engine pointer you
++ hold a reference on, even if <constant>UTRACE_DETACH</constant> has already
++ been used. After any <function>utrace_control</function> or
++ <function>utrace_set_events</function> call (these do not block), you can
++ call <function>utrace_barrier</function> to block until callbacks have
++ finished. This returns <constant>-ESRCH</constant> only if the engine is
++ completely detached (finished all callbacks). Otherwise it waits
++ until the thread is definitely not in the midst of a callback to this
++ engine and then returns zero, but can return
++ <constant>-ERESTARTSYS</constant> if its wait is interrupted.
++ </para>
++ </sect2>
++
++</sect1>
++
++</chapter>
++
++<chapter id="core"><title>utrace core API</title>
++
++<para>
++ The utrace API is declared in <filename>&lt;linux/utrace.h&gt;</filename>.
++</para>
++
++!Iinclude/linux/utrace.h
++!Ekernel/utrace.c
++
++</chapter>
++
++<chapter id="machine"><title>Machine State</title>
++
++<para>
++ The <function>task_current_syscall</function> function can be used on any
++ valid <structname>struct task_struct</structname> at any time, and does
++ not even require that <function>utrace_attach_task</function> was used at all.
++</para>
++
++<para>
++ The other ways to access the registers and other machine-dependent state of
++ a task can only be used on a task that is at a known safe point. The safe
++ points are all the places where <function>utrace_set_events</function> can
++ request callbacks (except for the <constant>DEATH</constant> and
++ <constant>REAP</constant> events). So at any event callback, it is safe to
++ examine <varname>current</varname>.
++</para>
++
++<para>
++ One task can examine another only after a callback in the target task that
++ returns <constant>UTRACE_STOP</constant> so that task will not return to user
++ mode after the safe point. This guarantees that the task will not resume
++ until the same engine uses <function>utrace_control</function>, unless the
++ task dies suddenly. To examine safely, one must use a pair of calls to
++ <function>utrace_prepare_examine</function> and
++ <function>utrace_finish_examine</function> surrounding the calls to
++ <structname>struct user_regset</structname> functions or direct examination
++ of task data structures. <function>utrace_prepare_examine</function> returns
++ an error if the task is not properly stopped, or is dead. After a
++ successful examination, the paired <function>utrace_finish_examine</function>
++ call returns an error if the task ever woke up during the examination. If
++ so, any data gathered may be scrambled and should be discarded. This means
++ there was a spurious wake-up (which should not happen), or a sudden death.
++</para>
++
++<sect1 id="regset"><title><structname>struct user_regset</structname></title>
++
++<para>
++ The <structname>struct user_regset</structname> API
++ is declared in <filename>&lt;linux/regset.h&gt;</filename>.
++</para>
++
++!Finclude/linux/regset.h
++
++</sect1>
++
++<sect1 id="task_current_syscall">
++ <title><filename>System Call Information</filename></title>
++
++<para>
++ This function is declared in <filename>&lt;linux/ptrace.h&gt;</filename>.
++</para>
++
++!Elib/syscall.c
++
++</sect1>
++
++<sect1 id="syscall"><title><filename>System Call Tracing</filename></title>
++
++<para>
++ The arch API for system call information is declared in
++ <filename>&lt;asm/syscall.h&gt;</filename>.
++ Each of these calls can be used only at system call entry tracing,
++ or can be used only at system call exit and the subsequent safe points
++ before returning to user mode.
++ At system call entry tracing means either during a
++ <structfield>report_syscall_entry</structfield> callback,
++ or any time after that callback has returned <constant>UTRACE_STOP</constant>.
++</para>
++
++!Finclude/asm-generic/syscall.h
++
++</sect1>
++
++</chapter>
++
++<chapter id="internals"><title>Kernel Internals</title>
++
++<para>
++ This chapter covers the interface to the tracing infrastructure
++ from the core of the kernel and the architecture-specific code.
++ This is for maintainers of the kernel and arch code, and not relevant
++ to using the tracing facilities described in preceding chapters.
++</para>
++
++<sect1 id="tracehook"><title>Core Calls In</title>
++
++<para>
++ These calls are declared in <filename>&lt;linux/tracehook.h&gt;</filename>.
++ The core kernel calls these functions at various important places.
++</para>
++
++!Finclude/linux/tracehook.h
++
++</sect1>
++
++<sect1 id="arch"><title>Architecture Calls Out</title>
++
++<para>
++ An arch that has done all these things sets
++ <constant>CONFIG_HAVE_ARCH_TRACEHOOK</constant>.
++ This is required to enable the <application>utrace</application> code.
++</para>
++
++<sect2 id="arch-ptrace"><title><filename>&lt;asm/ptrace.h&gt;</filename></title>
++
++<para>
++ An arch defines these in <filename>&lt;asm/ptrace.h&gt;</filename>
++ if it supports hardware single-step or block-step features.
++</para>
++
++!Finclude/linux/ptrace.h arch_has_single_step arch_has_block_step
++!Finclude/linux/ptrace.h user_enable_single_step user_enable_block_step
++!Finclude/linux/ptrace.h user_disable_single_step
++
++</sect2>
++
++<sect2 id="arch-syscall">
++ <title><filename>&lt;asm/syscall.h&gt;</filename></title>
++
++ <para>
++ An arch provides <filename>&lt;asm/syscall.h&gt;</filename> that
++ defines these as inlines, or declares them as exported functions.
++ These interfaces are described in <xref linkend="syscall"/>.
++ </para>
++
++</sect2>
++
++<sect2 id="arch-tracehook">
++ <title><filename>&lt;linux/tracehook.h&gt;</filename></title>
++
++ <para>
++ An arch must define <constant>TIF_NOTIFY_RESUME</constant>
++ and <constant>TIF_SYSCALL_TRACE</constant>
++ in its <filename>&lt;asm/thread_info.h&gt;</filename>.
++ The arch code must call the following functions, all declared
++ in <filename>&lt;linux/tracehook.h&gt;</filename> and
++ described in <xref linkend="tracehook"/>:
++
++ <itemizedlist>
++ <listitem>
++ <para><function>tracehook_notify_resume</function></para>
++ </listitem>
++ <listitem>
++ <para><function>tracehook_report_syscall_entry</function></para>
++ </listitem>
++ <listitem>
++ <para><function>tracehook_report_syscall_exit</function></para>
++ </listitem>
++ <listitem>
++ <para><function>tracehook_signal_handler</function></para>
++ </listitem>
++ </itemizedlist>
++
++ </para>
++
++</sect2>
++
++</sect1>
++
++</chapter>
++
++</book>
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 822c2d5..9069c91 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -82,6 +82,7 @@
+ #include <linux/pid_namespace.h>
+ #include <linux/ptrace.h>
+ #include <linux/tracehook.h>
++#include <linux/utrace.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+@@ -189,6 +190,8 @@ static inline void task_state(struct seq
+ cred->uid, cred->euid, cred->suid, cred->fsuid,
+ cred->gid, cred->egid, cred->sgid, cred->fsgid);
+
++ task_utrace_proc_status(m, p);
++
+ task_lock(p);
+ if (p->files)
+ fdt = files_fdtable(p->files);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 6c8928b..139d300 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1393,6 +1393,11 @@ struct task_struct {
+ #endif
+ seccomp_t seccomp;
+
++#ifdef CONFIG_UTRACE
++ struct utrace *utrace;
++ unsigned long utrace_flags;
++#endif
++
+ /* Thread group tracking */
+ u32 parent_exec_id;
+ u32 self_exec_id;
+diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
+index c78b2f4..71fa250 100644
+--- a/include/linux/tracehook.h
++++ b/include/linux/tracehook.h
+@@ -49,6 +49,7 @@
+ #include <linux/sched.h>
+ #include <linux/ptrace.h>
+ #include <linux/security.h>
++#include <linux/utrace.h>
+ struct linux_binprm;
+
+ /**
+@@ -63,6 +64,8 @@ struct linux_binprm;
+ */
+ static inline int tracehook_expect_breakpoints(struct task_struct *task)
+ {
++ if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_CORE)))
++ return 1;
+ return (task_ptrace(task) & PT_PTRACED) != 0;
+ }
+
+@@ -111,6 +114,9 @@ static inline void ptrace_report_syscall
+ static inline __must_check int tracehook_report_syscall_entry(
+ struct pt_regs *regs)
+ {
++ if ((task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_ENTRY)) &&
++ utrace_report_syscall_entry(regs))
++ return 1;
+ ptrace_report_syscall(regs);
+ return 0;
+ }
+@@ -134,6 +140,9 @@ static inline __must_check int tracehook
+ */
+ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
+ {
++ if (task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_EXIT))
++ utrace_report_syscall_exit(regs);
++
+ if (step && (task_ptrace(current) & PT_PTRACED)) {
+ siginfo_t info;
+ user_single_step_siginfo(current, regs, &info);
+@@ -201,6 +210,8 @@ static inline void tracehook_report_exec
+ struct linux_binprm *bprm,
+ struct pt_regs *regs)
+ {
++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXEC)))
++ utrace_report_exec(fmt, bprm, regs);
+ if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
+ unlikely(task_ptrace(current) & PT_PTRACED))
+ send_sig(SIGTRAP, current, 0);
+@@ -218,10 +229,37 @@ static inline void tracehook_report_exec
+ */
+ static inline void tracehook_report_exit(long *exit_code)
+ {
++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXIT)))
++ utrace_report_exit(exit_code);
+ ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
+ }
+
+ /**
++ * tracehook_init_task - task_struct has just been copied
++ * @task: new &struct task_struct just copied from parent
++ *
++ * Called from do_fork() when @task has just been duplicated.
++ * After this, @task will be passed to tracehook_free_task()
++ * even if the rest of its setup fails before it is fully created.
++ */
++static inline void tracehook_init_task(struct task_struct *task)
++{
++ utrace_init_task(task);
++}
++
++/**
++ * tracehook_free_task - task_struct is being freed
++ * @task: dead &struct task_struct being freed
++ *
++ * Called from free_task() when @task is no longer in use.
++ */
++static inline void tracehook_free_task(struct task_struct *task)
++{
++ if (task_utrace_struct(task))
++ utrace_free_task(task);
++}
++
++/**
+ * tracehook_prepare_clone - prepare for new child to be cloned
+ * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
+ *
+@@ -285,6 +323,8 @@ static inline void tracehook_report_clon
+ unsigned long clone_flags,
+ pid_t pid, struct task_struct *child)
+ {
++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)))
++ utrace_report_clone(clone_flags, child);
+ if (unlikely(task_ptrace(child))) {
+ /*
+ * It doesn't matter who attached/attaching to this
+@@ -317,6 +357,9 @@ static inline void tracehook_report_clon
+ pid_t pid,
+ struct task_struct *child)
+ {
++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)) &&
++ (clone_flags & CLONE_VFORK))
++ utrace_finish_vfork(current);
+ if (unlikely(trace))
+ ptrace_event(0, trace, pid);
+ }
+@@ -351,6 +394,10 @@ static inline void tracehook_report_vfor
+ */
+ static inline void tracehook_prepare_release_task(struct task_struct *task)
+ {
++ /* see utrace_add_engine() about this barrier */
++ smp_mb();
++ if (task_utrace_flags(task))
++ utrace_maybe_reap(task, task_utrace_struct(task), true);
+ }
+
+ /**
+@@ -365,6 +412,7 @@ static inline void tracehook_prepare_rel
+ static inline void tracehook_finish_release_task(struct task_struct *task)
+ {
+ ptrace_release_task(task);
++ BUG_ON(task->exit_state != EXIT_DEAD);
+ }
+
+ /**
+@@ -386,6 +434,8 @@ static inline void tracehook_signal_hand
+ const struct k_sigaction *ka,
+ struct pt_regs *regs, int stepping)
+ {
++ if (task_utrace_flags(current))
++ utrace_signal_handler(current, stepping);
+ if (stepping && (task_ptrace(current) & PT_PTRACED))
+ ptrace_notify(SIGTRAP);
+ }
+@@ -403,6 +453,8 @@ static inline void tracehook_signal_hand
+ static inline int tracehook_consider_ignored_signal(struct task_struct *task,
+ int sig)
+ {
++ if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_IGN)))
++ return 1;
+ return (task_ptrace(task) & PT_PTRACED) != 0;
+ }
+
+@@ -422,6 +474,9 @@ static inline int tracehook_consider_ign
+ static inline int tracehook_consider_fatal_signal(struct task_struct *task,
+ int sig)
+ {
++ if (unlikely(task_utrace_flags(task) & (UTRACE_EVENT(SIGNAL_TERM) |
++ UTRACE_EVENT(SIGNAL_CORE))))
++ return 1;
+ return (task_ptrace(task) & PT_PTRACED) != 0;
+ }
+
+@@ -436,6 +491,8 @@ static inline int tracehook_consider_fat
+ */
+ static inline int tracehook_force_sigpending(void)
+ {
++ if (unlikely(task_utrace_flags(current)))
++ return utrace_interrupt_pending();
+ return 0;
+ }
+
+@@ -465,6 +522,8 @@ static inline int tracehook_get_signal(s
+ siginfo_t *info,
+ struct k_sigaction *return_ka)
+ {
++ if (unlikely(task_utrace_flags(task)))
++ return utrace_get_signal(task, regs, info, return_ka);
+ return 0;
+ }
+
+@@ -492,6 +551,8 @@ static inline int tracehook_get_signal(s
+ */
+ static inline int tracehook_notify_jctl(int notify, int why)
+ {
++ if (task_utrace_flags(current) & UTRACE_EVENT(JCTL))
++ utrace_report_jctl(notify, why);
+ return notify ?: task_ptrace(current) ? why : 0;
+ }
+
+@@ -502,6 +563,8 @@ static inline int tracehook_notify_jctl(
+ */
+ static inline void tracehook_finish_jctl(void)
+ {
++ if (task_utrace_flags(current))
++ utrace_finish_stop();
+ }
+
+ #define DEATH_REAP -1
+@@ -524,6 +587,8 @@ static inline void tracehook_finish_jctl
+ static inline int tracehook_notify_death(struct task_struct *task,
+ void **death_cookie, int group_dead)
+ {
++ *death_cookie = task_utrace_struct(task);
++
+ if (task_detached(task))
+ return task->ptrace ? SIGCHLD : DEATH_REAP;
+
+@@ -560,6 +625,15 @@ static inline void tracehook_report_deat
+ int signal, void *death_cookie,
+ int group_dead)
+ {
++ /*
++ * If utrace_set_events() was just called to enable
++ * UTRACE_EVENT(DEATH), then we are obliged to call
++ * utrace_report_death() and not miss it. utrace_set_events()
++ * checks @task->exit_state under tasklist_lock to synchronize
++ * with exit_notify(), the caller.
++ */
++ if (task_utrace_flags(task) & _UTRACE_DEATH_EVENTS)
++ utrace_report_death(task, death_cookie, group_dead, signal);
+ }
+
+ #ifdef TIF_NOTIFY_RESUME
+@@ -589,10 +663,21 @@ static inline void set_notify_resume(str
+ * asynchronously, this will be called again before we return to
+ * user mode.
+ *
+- * Called without locks.
++ * Called without locks. However, on some machines this may be
++ * called with interrupts disabled.
+ */
+ static inline void tracehook_notify_resume(struct pt_regs *regs)
+ {
++ struct task_struct *task = current;
++ /*
++ * Prevent the following store/load from getting ahead of the
++ * caller which clears TIF_NOTIFY_RESUME. This pairs with the
++ * implicit mb() before setting TIF_NOTIFY_RESUME in
++ * set_notify_resume().
++ */
++ smp_mb();
++ if (task_utrace_flags(task))
++ utrace_resume(task, regs);
+ }
+ #endif /* TIF_NOTIFY_RESUME */
+
+diff --git a/include/linux/utrace.h b/include/linux/utrace.h
+new file mode 100644
+index ...f251efe 100644
+--- /dev/null
++++ b/include/linux/utrace.h
+@@ -0,0 +1,692 @@
++/*
++ * utrace infrastructure interface for debugging user processes
++ *
++ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ *
++ * This interface allows for notification of interesting events in a
++ * thread. It also mediates access to thread state such as registers.
++ * Multiple unrelated users can be associated with a single thread.
++ * We call each of these a tracing engine.
++ *
++ * A tracing engine starts by calling utrace_attach_task() or
++ * utrace_attach_pid() on the chosen thread, passing in a set of hooks
++ * (&struct utrace_engine_ops), and some associated data. This produces a
++ * &struct utrace_engine, which is the handle used for all other
++ * operations. An attached engine has its ops vector, its data, and an
++ * event mask controlled by utrace_set_events().
++ *
++ * For each event bit that is set, that engine will get the
++ * appropriate ops->report_*() callback when the event occurs. The
++ * &struct utrace_engine_ops need not provide callbacks for an event
++ * unless the engine sets one of the associated event bits.
++ */
++
++#ifndef _LINUX_UTRACE_H
++#define _LINUX_UTRACE_H 1
++
++#include <linux/list.h>
++#include <linux/kref.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++
++struct linux_binprm;
++struct pt_regs;
++struct utrace;
++struct user_regset;
++struct user_regset_view;
++
++/*
++ * Event bits passed to utrace_set_events().
++ * These appear in &struct task_struct.@utrace_flags
++ * and &struct utrace_engine.@flags.
++ */
++enum utrace_events {
++ _UTRACE_EVENT_QUIESCE, /* Thread is available for examination. */
++ _UTRACE_EVENT_REAP, /* Zombie reaped, no more tracing possible. */
++ _UTRACE_EVENT_CLONE, /* Successful clone/fork/vfork just done. */
++ _UTRACE_EVENT_EXEC, /* Successful execve just completed. */
++ _UTRACE_EVENT_EXIT, /* Thread exit in progress. */
++ _UTRACE_EVENT_DEATH, /* Thread has died. */
++ _UTRACE_EVENT_SYSCALL_ENTRY, /* User entered kernel for system call. */
++ _UTRACE_EVENT_SYSCALL_EXIT, /* Returning to user after system call. */
++ _UTRACE_EVENT_SIGNAL, /* Signal delivery will run a user handler. */
++ _UTRACE_EVENT_SIGNAL_IGN, /* No-op signal to be delivered. */
++ _UTRACE_EVENT_SIGNAL_STOP, /* Signal delivery will suspend. */
++ _UTRACE_EVENT_SIGNAL_TERM, /* Signal delivery will terminate. */
++ _UTRACE_EVENT_SIGNAL_CORE, /* Signal delivery will dump core. */
++ _UTRACE_EVENT_JCTL, /* Job control stop or continue completed. */
++ _UTRACE_NEVENTS
++};
++#define UTRACE_EVENT(type) (1UL << _UTRACE_EVENT_##type)
++
++/*
++ * All the kinds of signal events.
++ * These all use the @report_signal() callback.
++ */
++#define UTRACE_EVENT_SIGNAL_ALL (UTRACE_EVENT(SIGNAL) \
++ | UTRACE_EVENT(SIGNAL_IGN) \
++ | UTRACE_EVENT(SIGNAL_STOP) \
++ | UTRACE_EVENT(SIGNAL_TERM) \
++ | UTRACE_EVENT(SIGNAL_CORE))
++/*
++ * Both kinds of syscall events; these call the @report_syscall_entry()
++ * and @report_syscall_exit() callbacks, respectively.
++ */
++#define UTRACE_EVENT_SYSCALL \
++ (UTRACE_EVENT(SYSCALL_ENTRY) | UTRACE_EVENT(SYSCALL_EXIT))
++
++/*
++ * The event reports triggered synchronously by task death.
++ */
++#define _UTRACE_DEATH_EVENTS (UTRACE_EVENT(DEATH) | UTRACE_EVENT(QUIESCE))
++
++/*
++ * Hooks in <linux/tracehook.h> call these entry points to the utrace dispatch.
++ */
++void utrace_free_task(struct task_struct *);
++bool utrace_interrupt_pending(void);
++void utrace_resume(struct task_struct *, struct pt_regs *);
++void utrace_finish_stop(void);
++void utrace_maybe_reap(struct task_struct *, struct utrace *, bool);
++int utrace_get_signal(struct task_struct *, struct pt_regs *,
++ siginfo_t *, struct k_sigaction *);
++void utrace_report_clone(unsigned long, struct task_struct *);
++void utrace_finish_vfork(struct task_struct *);
++void utrace_report_exit(long *exit_code);
++void utrace_report_death(struct task_struct *, struct utrace *, bool, int);
++void utrace_report_jctl(int notify, int type);
++void utrace_report_exec(struct linux_binfmt *, struct linux_binprm *,
++ struct pt_regs *regs);
++bool utrace_report_syscall_entry(struct pt_regs *);
++void utrace_report_syscall_exit(struct pt_regs *);
++void utrace_signal_handler(struct task_struct *, int);
++
++#ifndef CONFIG_UTRACE
++
++/*
++ * <linux/tracehook.h> uses these accessors to avoid #ifdef CONFIG_UTRACE.
++ */
++static inline unsigned long task_utrace_flags(struct task_struct *task)
++{
++ return 0;
++}
++static inline struct utrace *task_utrace_struct(struct task_struct *task)
++{
++ return NULL;
++}
++static inline void utrace_init_task(struct task_struct *child)
++{
++}
++
++static inline void task_utrace_proc_status(struct seq_file *m,
++ struct task_struct *p)
++{
++}
++
++#else /* CONFIG_UTRACE */
++
++static inline unsigned long task_utrace_flags(struct task_struct *task)
++{
++ return task->utrace_flags;
++}
++
++static inline struct utrace *task_utrace_struct(struct task_struct *task)
++{
++ struct utrace *utrace;
++
++ /*
++ * This barrier ensures that any prior load of task->utrace_flags
++ * is ordered before this load of task->utrace. We use those
++ * utrace_flags checks in the hot path to decide to call into
++ * the utrace code. The first attach installs task->utrace before
++ * setting task->utrace_flags nonzero with implicit barrier in
++ * between, see utrace_add_engine().
++ */
++ smp_rmb();
++ utrace = task->utrace;
++
++ smp_read_barrier_depends(); /* See utrace_task_alloc(). */
++ return utrace;
++}
++
++static inline void utrace_init_task(struct task_struct *task)
++{
++ task->utrace_flags = 0;
++ task->utrace = NULL;
++}
++
++void task_utrace_proc_status(struct seq_file *m, struct task_struct *p);
++
++
++/*
++ * Version number of the API defined in this file. This will change
++ * whenever a tracing engine's code would need some updates to keep
++ * working. We maintain this here for the benefit of tracing engine code
++ * that is developed concurrently with utrace API improvements before they
++ * are merged into the kernel, making LINUX_VERSION_CODE checks unwieldy.
++ */
++#define UTRACE_API_VERSION 20091216
++
++/**
++ * enum utrace_resume_action - engine's choice of action for a traced task
++ * @UTRACE_STOP: Stay quiescent after callbacks.
++ * @UTRACE_INTERRUPT: Make @report_signal() callback soon.
++ * @UTRACE_REPORT: Make some callback soon.
++ * @UTRACE_SINGLESTEP: Resume in user mode for one instruction.
++ * @UTRACE_BLOCKSTEP: Resume in user mode until next branch.
++ * @UTRACE_RESUME: Resume normally in user mode.
++ * @UTRACE_DETACH: Detach my engine (implies %UTRACE_RESUME).
++ *
++ * See utrace_control() for detailed descriptions of each action. This is
++ * encoded in the @action argument and the return value for every callback
++ * with a &u32 return value.
++ *
++ * The order of these is important. When there is more than one engine,
++ * each supplies its choice and the smallest value prevails.
++ */
++enum utrace_resume_action {
++ UTRACE_STOP,
++ UTRACE_INTERRUPT,
++ UTRACE_REPORT,
++ UTRACE_SINGLESTEP,
++ UTRACE_BLOCKSTEP,
++ UTRACE_RESUME,
++ UTRACE_DETACH,
++ UTRACE_RESUME_MAX
++};
++#define UTRACE_RESUME_BITS (ilog2(UTRACE_RESUME_MAX) + 1)
++#define UTRACE_RESUME_MASK ((1 << UTRACE_RESUME_BITS) - 1)
++
++/**
++ * utrace_resume_action - &enum utrace_resume_action from callback action
++ * @action: &u32 callback @action argument or return value
++ *
++ * This extracts the &enum utrace_resume_action from @action,
++ * which is the @action argument to a &struct utrace_engine_ops
++ * callback or the return value from one.
++ */
++static inline enum utrace_resume_action utrace_resume_action(u32 action)
++{
++ return action & UTRACE_RESUME_MASK;
++}
++
++/**
++ * enum utrace_signal_action - disposition of signal
++ * @UTRACE_SIGNAL_DELIVER: Deliver according to sigaction.
++ * @UTRACE_SIGNAL_IGN: Ignore the signal.
++ * @UTRACE_SIGNAL_TERM: Terminate the process.
++ * @UTRACE_SIGNAL_CORE: Terminate with core dump.
++ * @UTRACE_SIGNAL_STOP: Deliver as absolute stop.
++ * @UTRACE_SIGNAL_TSTP: Deliver as job control stop.
++ * @UTRACE_SIGNAL_REPORT: Reporting before pending signals.
++ * @UTRACE_SIGNAL_HANDLER: Reporting after signal handler setup.
++ *
++ * This is encoded in the @action argument and the return value for
++ * a @report_signal() callback. It says what will happen to the
++ * signal described by the &siginfo_t parameter to the callback.
++ *
++ * The %UTRACE_SIGNAL_REPORT value is used in an @action argument when
++ * a tracing report is being made before dequeuing any pending signal.
++ * If this is immediately after a signal handler has been set up, then
++ * %UTRACE_SIGNAL_HANDLER is used instead. A @report_signal callback
++ * that uses %UTRACE_SIGNAL_DELIVER|%UTRACE_SINGLESTEP will ensure
++ * it sees a %UTRACE_SIGNAL_HANDLER report.
++ */
++enum utrace_signal_action {
++ UTRACE_SIGNAL_DELIVER = 0x00,
++ UTRACE_SIGNAL_IGN = 0x10,
++ UTRACE_SIGNAL_TERM = 0x20,
++ UTRACE_SIGNAL_CORE = 0x30,
++ UTRACE_SIGNAL_STOP = 0x40,
++ UTRACE_SIGNAL_TSTP = 0x50,
++ UTRACE_SIGNAL_REPORT = 0x60,
++ UTRACE_SIGNAL_HANDLER = 0x70
++};
++#define UTRACE_SIGNAL_MASK 0xf0
++#define UTRACE_SIGNAL_HOLD 0x100 /* Flag, push signal back on queue. */
++
++/**
++ * utrace_signal_action - &enum utrace_signal_action from callback action
++ * @action: @report_signal callback @action argument or return value
++ *
++ * This extracts the &enum utrace_signal_action from @action, which
++ * is the @action argument to a @report_signal callback or the
++ * return value from one.
++ */
++static inline enum utrace_signal_action utrace_signal_action(u32 action)
++{
++ return action & UTRACE_SIGNAL_MASK;
++}
++
++/**
++ * enum utrace_syscall_action - disposition of system call attempt
++ * @UTRACE_SYSCALL_RUN: Run the system call.
++ * @UTRACE_SYSCALL_ABORT: Don't run the system call.
++ *
++ * This is encoded in the @action argument and the return value for
++ * a @report_syscall_entry callback.
++ */
++enum utrace_syscall_action {
++ UTRACE_SYSCALL_RUN = 0x00,
++ UTRACE_SYSCALL_ABORT = 0x10
++};
++#define UTRACE_SYSCALL_MASK 0xf0
++#define UTRACE_SYSCALL_RESUMED 0x100 /* Flag, report_syscall_entry() repeats */
++
++/**
++ * utrace_syscall_action - &enum utrace_syscall_action from callback action
++ * @action: @report_syscall_entry callback @action or return value
++ *
++ * This extracts the &enum utrace_syscall_action from @action, which
++ * is the @action argument to a @report_syscall_entry callback or the
++ * return value from one.
++ */
++static inline enum utrace_syscall_action utrace_syscall_action(u32 action)
++{
++ return action & UTRACE_SYSCALL_MASK;
++}
++
++/*
++ * Flags for utrace_attach_task() and utrace_attach_pid().
++ */
++#define UTRACE_ATTACH_MATCH_OPS 0x0001 /* Match engines on ops. */
++#define UTRACE_ATTACH_MATCH_DATA 0x0002 /* Match engines on data. */
++#define UTRACE_ATTACH_MATCH_MASK 0x000f
++#define UTRACE_ATTACH_CREATE 0x0010 /* Attach a new engine. */
++#define UTRACE_ATTACH_EXCLUSIVE 0x0020 /* Refuse if existing match. */
++
++/**
++ * struct utrace_engine - per-engine structure
++ * @ops: &struct utrace_engine_ops pointer passed to utrace_attach_task()
++ * @data: engine-private &void * passed to utrace_attach_task()
++ * @flags: event mask set by utrace_set_events() plus internal flag bits
++ *
++ * The task itself never has to worry about engines detaching while
++ * it's doing event callbacks. These structures are removed from the
++ * task's active list only when it's stopped, or by the task itself.
++ *
++ * utrace_engine_get() and utrace_engine_put() maintain a reference count.
++ * When it drops to zero, the structure is freed. One reference is held
++ * implicitly while the engine is attached to its task.
++ */
++struct utrace_engine {
++/* private: */
++ struct kref kref;
++ void (*release)(void *);
++ struct list_head entry;
++
++/* public: */
++ const struct utrace_engine_ops *ops;
++ void *data;
++
++ unsigned long flags;
++};
++
++/**
++ * utrace_engine_get - acquire a reference on a &struct utrace_engine
++ * @engine: &struct utrace_engine pointer
++ *
++ * You must hold a reference on @engine, and you get another.
++ */
++static inline void utrace_engine_get(struct utrace_engine *engine)
++{
++ kref_get(&engine->kref);
++}
++
++void __utrace_engine_release(struct kref *);
++
++/**
++ * utrace_engine_put - release a reference on a &struct utrace_engine
++ * @engine: &struct utrace_engine pointer
++ *
++ * You must hold a reference on @engine, and you lose that reference.
++ * If it was the last one, @engine becomes an invalid pointer.
++ */
++static inline void utrace_engine_put(struct utrace_engine *engine)
++{
++ kref_put(&engine->kref, __utrace_engine_release);
++}
++
++/**
++ * struct utrace_engine_ops - tracing engine callbacks
++ *
++ * Each @report_*() callback corresponds to an %UTRACE_EVENT(*) bit.
++ * utrace_set_events() calls on @engine choose which callbacks will
++ * be made to @engine from @task.
++ *
++ * Most callbacks take an @action argument, giving the resume action
++ * chosen by other tracing engines. All callbacks take an @engine
++ * argument. The @report_reap callback takes a @task argument that
++ * might or might not be @current. All other @report_* callbacks
++ * report an event in the @current task.
++ *
++ * For some calls, @action also includes bits specific to that event
++ * and utrace_resume_action() is used to extract the resume action.
++ * This shows what would happen if @engine wasn't there, or will if
++ * the callback's return value uses %UTRACE_RESUME. This always
++ * starts as %UTRACE_RESUME when no other tracing is being done on
++ * this task.
++ *
++ * All return values contain &enum utrace_resume_action bits. For
++ * some calls, other bits specific to that kind of event are added to
++ * the resume action bits with OR. These are the same bits used in
++ * the @action argument. The resume action returned by a callback
++ * does not override previous engines' choices, it only says what
++ * @engine wants done. What @current actually does is the action that's
++ * most constrained among the choices made by all attached engines.
++ * See utrace_control() for more information on the actions.
++ *
++ * When %UTRACE_STOP is used in @report_syscall_entry, then @current
++ * stops before attempting the system call. In this case, another
++ * @report_syscall_entry callback will follow after @current resumes if
++ * %UTRACE_REPORT or %UTRACE_INTERRUPT was returned by some callback
++ * or passed to utrace_control(). In a second or later callback,
++ * %UTRACE_SYSCALL_RESUMED is set in the @action argument to indicate
++ * a repeat callback still waiting to attempt the same system call
++ * invocation. This repeat callback gives each engine an opportunity
++ * to reexamine registers another engine might have changed while
++ * @current was held in %UTRACE_STOP.
++ *
++ * In other cases, the resume action does not take effect until @current
++ * is ready to check for signals and return to user mode. If there
++ * are more callbacks to be made, the last round of calls determines
++ * the final action. A @report_quiesce callback with @event zero, or
++ * a @report_signal callback, will always be the last one made before
++ * @current resumes. Only %UTRACE_STOP is "sticky"--if @engine returned
++ * %UTRACE_STOP then @current stays stopped unless @engine returns
++ * different from a following callback.
++ *
++ * The report_death() and report_reap() callbacks do not take @action
++ * arguments, and only %UTRACE_DETACH is meaningful in the return value
++ * from a report_death() callback. None of the resume actions applies
++ * to a dead thread.
++ *
++ * All @report_*() hooks are called with no locks held, in a generally
++ * safe environment when we will be returning to user mode soon (or just
++ * entered the kernel). It is fine to block for memory allocation and
++ * the like, but all hooks are asynchronous and must not block on
++ * external events! If you want the thread to block, use %UTRACE_STOP
++ * in your hook's return value; then later wake it up with utrace_control().
++ *
++ * @report_quiesce:
++ * Requested by %UTRACE_EVENT(%QUIESCE).
++ * This does not indicate any event, but just that @current is in a
++ * safe place for examination. This call is made before each specific
++ * event callback, except for @report_reap. The @event argument gives
++ * the %UTRACE_EVENT(@which) value for the event occurring. This
++ * callback might be made for events @engine has not requested, if
++ * some other engine is tracing the event; calling utrace_set_events()
++ * call here can request the immediate callback for this occurrence of
++ * @event. @event is zero when there is no other event, @current is
++ * now ready to check for signals and return to user mode, and some
++ * engine has used %UTRACE_REPORT or %UTRACE_INTERRUPT to request this
++ * callback. For this case, if @report_signal is not %NULL, the
++ * @report_quiesce callback may be replaced with a @report_signal
++ * callback passing %UTRACE_SIGNAL_REPORT in its @action argument,
++ * whenever @current is entering the signal-check path anyway.
++ *
++ * @report_signal:
++ * Requested by %UTRACE_EVENT(%SIGNAL_*) or %UTRACE_EVENT(%QUIESCE).
++ * Use utrace_signal_action() and utrace_resume_action() on @action.
++ * The signal action is %UTRACE_SIGNAL_REPORT when some engine has
++ * used %UTRACE_REPORT or %UTRACE_INTERRUPT; the callback can choose
++ * to stop or to deliver an artificial signal, before pending signals.
++ * It's %UTRACE_SIGNAL_HANDLER instead when signal handler setup just
++ * finished (after a previous %UTRACE_SIGNAL_DELIVER return); this
++ * serves in lieu of any %UTRACE_SIGNAL_REPORT callback requested by
++ * %UTRACE_REPORT or %UTRACE_INTERRUPT, and is also implicitly
++ * requested by %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP into the
++ * signal delivery. The other signal actions indicate a signal about
++ * to be delivered; the previous engine's return value sets the signal
++ * action seen by the the following engine's callback. The @info data
++ * can be changed at will, including @info->si_signo. The settings in
++ * @return_ka determines what %UTRACE_SIGNAL_DELIVER does. @orig_ka
++ * is what was in force before other tracing engines intervened, and
++ * it's %NULL when this report began as %UTRACE_SIGNAL_REPORT or
++ * %UTRACE_SIGNAL_HANDLER. For a report without a new signal, @info
++ * is left uninitialized and must be set completely by an engine that
++ * chooses to deliver a signal; if there was a previous @report_signal
++ * callback ending in %UTRACE_STOP and it was just resumed using
++ * %UTRACE_REPORT or %UTRACE_INTERRUPT, then @info is left unchanged
++ * from the previous callback. In this way, the original signal can
++ * be left in @info while returning %UTRACE_STOP|%UTRACE_SIGNAL_IGN
++ * and then found again when resuming with %UTRACE_INTERRUPT.
++ * The %UTRACE_SIGNAL_HOLD flag bit can be OR'd into the return value,
++ * and might be in @action if the previous engine returned it. This
++ * flag asks that the signal in @info be pushed back on @current's queue
++ * so that it will be seen again after whatever action is taken now.
++ *
++ * @report_clone:
++ * Requested by %UTRACE_EVENT(%CLONE).
++ * Event reported for parent, before the new task @child might run.
++ * @clone_flags gives the flags used in the clone system call, or
++ * equivalent flags for a fork() or vfork() system call. This
++ * function can use utrace_attach_task() on @child. Then passing
++ * %UTRACE_STOP to utrace_control() on @child here keeps the child
++ * stopped before it ever runs in user mode, %UTRACE_REPORT or
++ * %UTRACE_INTERRUPT ensures a callback from @child before it
++ * starts in user mode.
++ *
++ * @report_jctl:
++ * Requested by %UTRACE_EVENT(%JCTL).
++ * Job control event; @type is %CLD_STOPPED or %CLD_CONTINUED,
++ * indicating whether we are stopping or resuming now. If @notify
++ * is nonzero, @current is the last thread to stop and so will send
++ * %SIGCHLD to its parent after this callback; @notify reflects
++ * what the parent's %SIGCHLD has in @si_code, which can sometimes
++ * be %CLD_STOPPED even when @type is %CLD_CONTINUED.
++ *
++ * @report_exec:
++ * Requested by %UTRACE_EVENT(%EXEC).
++ * An execve system call has succeeded and the new program is about to
++ * start running. The initial user register state is handy to be tweaked
++ * directly in @regs. @fmt and @bprm gives the details of this exec.
++ *
++ * @report_syscall_entry:
++ * Requested by %UTRACE_EVENT(%SYSCALL_ENTRY).
++ * Thread has entered the kernel to request a system call.
++ * The user register state is handy to be tweaked directly in @regs.
++ * The @action argument contains an &enum utrace_syscall_action,
++ * use utrace_syscall_action() to extract it. The return value
++ * overrides the last engine's action for the system call.
++ * If the final action is %UTRACE_SYSCALL_ABORT, no system call
++ * is made. The details of the system call being attempted can
++ * be fetched here with syscall_get_nr() and syscall_get_arguments().
++ * The parameter registers can be changed with syscall_set_arguments().
++ * See above about the %UTRACE_SYSCALL_RESUMED flag in @action.
++ * Use %UTRACE_REPORT in the return value to guarantee you get
++ * another callback (with %UTRACE_SYSCALL_RESUMED flag) in case
++ * @current stops with %UTRACE_STOP before attempting the system call.
++ *
++ * @report_syscall_exit:
++ * Requested by %UTRACE_EVENT(%SYSCALL_EXIT).
++ * Thread is about to leave the kernel after a system call request.
++ * The user register state is handy to be tweaked directly in @regs.
++ * The results of the system call attempt can be examined here using
++ * syscall_get_error() and syscall_get_return_value(). It is safe
++ * here to call syscall_set_return_value() or syscall_rollback().
++ *
++ * @report_exit:
++ * Requested by %UTRACE_EVENT(%EXIT).
++ * Thread is exiting and cannot be prevented from doing so,
++ * but all its state is still live. The @code value will be
++ * the wait result seen by the parent, and can be changed by
++ * this engine or others. The @orig_code value is the real
++ * status, not changed by any tracing engine. Returning %UTRACE_STOP
++ * here keeps @current stopped before it cleans up its state and dies,
++ * so it can be examined by other processes. When @current is allowed
++ * to run, it will die and get to the @report_death callback.
++ *
++ * @report_death:
++ * Requested by %UTRACE_EVENT(%DEATH).
++ * Thread is really dead now. It might be reaped by its parent at
++ * any time, or self-reap immediately. Though the actual reaping
++ * may happen in parallel, a report_reap() callback will always be
++ * ordered after a report_death() callback.
++ *
++ * @report_reap:
++ * Requested by %UTRACE_EVENT(%REAP).
++ * Called when someone reaps the dead task (parent, init, or self).
++ * This means the parent called wait, or else this was a detached
++ * thread or a process whose parent ignores SIGCHLD.
++ * No more callbacks are made after this one.
++ * The engine is always detached.
++ * There is nothing more a tracing engine can do about this thread.
++ * After this callback, the @engine pointer will become invalid.
++ * The @task pointer may become invalid if get_task_struct() hasn't
++ * been used to keep it alive.
++ * An engine should always request this callback if it stores the
++ * @engine pointer or stores any pointer in @engine->data, so it
++ * can clean up its data structures.
++ * Unlike other callbacks, this can be called from the parent's context
++ * rather than from the traced thread itself--it must not delay the
++ * parent by blocking.
++ *
++ * @release:
++ * If not %NULL, this is called after the last utrace_engine_put()
++ * call for a &struct utrace_engine, which could be implicit after
++ * a %UTRACE_DETACH return from another callback. Its argument is
++ * the engine's @data member.
++ */
++struct utrace_engine_ops {
++ u32 (*report_quiesce)(u32 action, struct utrace_engine *engine,
++ unsigned long event);
++ u32 (*report_signal)(u32 action, struct utrace_engine *engine,
++ struct pt_regs *regs,
++ siginfo_t *info,
++ const struct k_sigaction *orig_ka,
++ struct k_sigaction *return_ka);
++ u32 (*report_clone)(u32 action, struct utrace_engine *engine,
++ unsigned long clone_flags,
++ struct task_struct *child);
++ u32 (*report_jctl)(u32 action, struct utrace_engine *engine,
++ int type, int notify);
++ u32 (*report_exec)(u32 action, struct utrace_engine *engine,
++ const struct linux_binfmt *fmt,
++ const struct linux_binprm *bprm,
++ struct pt_regs *regs);
++ u32 (*report_syscall_entry)(u32 action, struct utrace_engine *engine,
++ struct pt_regs *regs);
++ u32 (*report_syscall_exit)(u32 action, struct utrace_engine *engine,
++ struct pt_regs *regs);
++ u32 (*report_exit)(u32 action, struct utrace_engine *engine,
++ long orig_code, long *code);
++ u32 (*report_death)(struct utrace_engine *engine,
++ bool group_dead, int signal);
++ void (*report_reap)(struct utrace_engine *engine,
++ struct task_struct *task);
++ void (*release)(void *data);
++};
++
++/**
++ * struct utrace_examiner - private state for using utrace_prepare_examine()
++ *
++ * The members of &struct utrace_examiner are private to the implementation.
++ * This data type holds the state from a call to utrace_prepare_examine()
++ * to be used by a call to utrace_finish_examine().
++ */
++struct utrace_examiner {
++/* private: */
++ long state;
++ unsigned long ncsw;
++};
++
++/*
++ * These are the exported entry points for tracing engines to use.
++ * See kernel/utrace.c for their kerneldoc comments with interface details.
++ */
++struct utrace_engine *utrace_attach_task(struct task_struct *, int,
++ const struct utrace_engine_ops *,
++ void *);
++struct utrace_engine *utrace_attach_pid(struct pid *, int,
++ const struct utrace_engine_ops *,
++ void *);
++int __must_check utrace_control(struct task_struct *,
++ struct utrace_engine *,
++ enum utrace_resume_action);
++int __must_check utrace_set_events(struct task_struct *,
++ struct utrace_engine *,
++ unsigned long eventmask);
++int __must_check utrace_barrier(struct task_struct *,
++ struct utrace_engine *);
++int __must_check utrace_prepare_examine(struct task_struct *,
++ struct utrace_engine *,
++ struct utrace_examiner *);
++int __must_check utrace_finish_examine(struct task_struct *,
++ struct utrace_engine *,
++ struct utrace_examiner *);
++
++/**
++ * utrace_control_pid - control a thread being traced by a tracing engine
++ * @pid: thread to affect
++ * @engine: attached engine to affect
++ * @action: &enum utrace_resume_action for thread to do
++ *
++ * This is the same as utrace_control(), but takes a &struct pid
++ * pointer rather than a &struct task_struct pointer. The caller must
++ * hold a ref on @pid, but does not need to worry about the task
++ * staying valid. If it's been reaped so that @pid points nowhere,
++ * then this call returns -%ESRCH.
++ */
++static inline __must_check int utrace_control_pid(
++ struct pid *pid, struct utrace_engine *engine,
++ enum utrace_resume_action action)
++{
++ /*
++ * We don't bother with rcu_read_lock() here to protect the
++ * task_struct pointer, because utrace_control will return
++ * -ESRCH without looking at that pointer if the engine is
++ * already detached. A task_struct pointer can't die before
++ * all the engines are detached in release_task() first.
++ */
++ struct task_struct *task = pid_task(pid, PIDTYPE_PID);
++ return unlikely(!task) ? -ESRCH : utrace_control(task, engine, action);
++}
++
++/**
++ * utrace_set_events_pid - choose which event reports a tracing engine gets
++ * @pid: thread to affect
++ * @engine: attached engine to affect
++ * @eventmask: new event mask
++ *
++ * This is the same as utrace_set_events(), but takes a &struct pid
++ * pointer rather than a &struct task_struct pointer. The caller must
++ * hold a ref on @pid, but does not need to worry about the task
++ * staying valid. If it's been reaped so that @pid points nowhere,
++ * then this call returns -%ESRCH.
++ */
++static inline __must_check int utrace_set_events_pid(
++ struct pid *pid, struct utrace_engine *engine, unsigned long eventmask)
++{
++ struct task_struct *task = pid_task(pid, PIDTYPE_PID);
++ return unlikely(!task) ? -ESRCH :
++ utrace_set_events(task, engine, eventmask);
++}
++
++/**
++ * utrace_barrier_pid - synchronize with simultaneous tracing callbacks
++ * @pid: thread to affect
++ * @engine: engine to affect (can be detached)
++ *
++ * This is the same as utrace_barrier(), but takes a &struct pid
++ * pointer rather than a &struct task_struct pointer. The caller must
++ * hold a ref on @pid, but does not need to worry about the task
++ * staying valid. If it's been reaped so that @pid points nowhere,
++ * then this call returns -%ESRCH.
++ */
++static inline __must_check int utrace_barrier_pid(struct pid *pid,
++ struct utrace_engine *engine)
++{
++ struct task_struct *task = pid_task(pid, PIDTYPE_PID);
++ return unlikely(!task) ? -ESRCH : utrace_barrier(task, engine);
++}
++
++#endif /* CONFIG_UTRACE */
++
++#endif /* linux/utrace.h */
+diff --git a/init/Kconfig b/init/Kconfig
+index eb4b337..140e636 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -310,6 +310,15 @@ config AUDIT_TREE
+ depends on AUDITSYSCALL
+ select INOTIFY
+
++config UTRACE
++ bool "Infrastructure for tracing and debugging user processes"
++ depends on EXPERIMENTAL
++ depends on HAVE_ARCH_TRACEHOOK
++ help
++ Enable the utrace process tracing interface. This is an internal
++ kernel interface exported to kernel modules, to track events in
++ user threads, extract and change user thread state.
++
+ menu "RCU Subsystem"
+
+ choice
+diff --git a/kernel/Makefile b/kernel/Makefile
+index d7c13d2..263bb19 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -68,6 +68,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o
+ obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
+ obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
+ obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
++obj-$(CONFIG_UTRACE) += utrace.o
+ obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
+ obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
+ obj-$(CONFIG_GCOV_KERNEL) += gcov/
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 166b8c4..3ac952e 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -152,6 +152,7 @@ void free_task(struct task_struct *tsk)
+ free_thread_info(tsk->stack);
+ rt_mutex_debug_task_free(tsk);
+ ftrace_graph_exit_task(tsk);
++ tracehook_free_task(tsk);
+ free_task_struct(tsk);
+ }
+ EXPORT_SYMBOL(free_task);
+@@ -1018,6 +1019,8 @@ static struct task_struct *copy_process(
+ if (!p)
+ goto fork_out;
+
++ tracehook_init_task(p);
++
+ ftrace_graph_init_task(p);
+
+ rt_mutex_init_task(p);
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index b7c1d32..a408bf7 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -16,6 +16,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/smp_lock.h>
+ #include <linux/ptrace.h>
++#include <linux/utrace.h>
+ #include <linux/security.h>
+ #include <linux/signal.h>
+ #include <linux/audit.h>
+@@ -164,6 +165,14 @@ bool ptrace_may_access(struct task_struc
+ return !err;
+ }
+
++/*
++ * For experimental use of utrace, exclude ptrace on the same task.
++ */
++static inline bool exclude_ptrace(struct task_struct *task)
++{
++ return unlikely(!!task_utrace_flags(task));
++}
++
+ int ptrace_attach(struct task_struct *task)
+ {
+ int retval;
+@@ -187,6 +196,8 @@ int ptrace_attach(struct task_struct *ta
+
+ task_lock(task);
+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
++ if (!retval && exclude_ptrace(task))
++ retval = -EBUSY;
+ task_unlock(task);
+ if (retval)
+ goto unlock_creds;
+@@ -224,6 +235,9 @@ int ptrace_traceme(void)
+ {
+ int ret = -EPERM;
+
++ if (exclude_ptrace(current)) /* XXX locking */
++ return -EBUSY;
++
+ write_lock_irq(&tasklist_lock);
+ /* Are we already being traced? */
+ if (!current->ptrace) {
+diff --git a/kernel/utrace.c b/kernel/utrace.c
+new file mode 100644
+index ...f003e34 100644
+--- /dev/null
++++ b/kernel/utrace.c
+@@ -0,0 +1,2436 @@
++/*
++ * utrace infrastructure interface for debugging user processes
++ *
++ * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
++ *
++ * This copyrighted material is made available to anyone wishing to use,
++ * modify, copy, or redistribute it subject to the terms and conditions
++ * of the GNU General Public License v.2.
++ *
++ * Red Hat Author: Roland McGrath.
++ */
++
++#include <linux/utrace.h>
++#include <linux/tracehook.h>
++#include <linux/regset.h>
++#include <asm/syscall.h>
++#include <linux/ptrace.h>
++#include <linux/err.h>
++#include <linux/sched.h>
++#include <linux/freezer.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/seq_file.h>
++
++
++/*
++ * Per-thread structure private to utrace implementation.
++ * If task_struct.utrace_flags is nonzero, task_struct.utrace
++ * has always been allocated first. Once allocated, it is
++ * never freed until free_task().
++ *
++ * The common event reporting loops are done by the task making the
++ * report without ever taking any locks. To facilitate this, the two
++ * lists @attached and @attaching work together for smooth asynchronous
++ * attaching with low overhead. Modifying either list requires @lock.
++ * The @attaching list can be modified any time while holding @lock.
++ * New engines being attached always go on this list.
++ *
++ * The @attached list is what the task itself uses for its reporting
++ * loops. When the task itself is not quiescent, it can use the
++ * @attached list without taking any lock. Nobody may modify the list
++ * when the task is not quiescent. When it is quiescent, that means
++ * that it won't run again without taking @lock itself before using
++ * the list.
++ *
++ * At each place where we know the task is quiescent (or it's current),
++ * while holding @lock, we call splice_attaching(), below. This moves
++ * the @attaching list members on to the end of the @attached list.
++ * Since this happens at the start of any reporting pass, any new
++ * engines attached asynchronously go on the stable @attached list
++ * in time to have their callbacks seen.
++ */
++struct utrace {
++ spinlock_t lock;
++ struct list_head attached, attaching;
++
++ struct task_struct *cloning;
++
++ struct utrace_engine *reporting;
++
++ enum utrace_resume_action resume:UTRACE_RESUME_BITS;
++ unsigned int signal_handler:1;
++ unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */
++ unsigned int death:1; /* in utrace_report_death() now */
++ unsigned int reap:1; /* release_task() has run */
++ unsigned int pending_attach:1; /* need splice_attaching() */
++};
++
++static struct kmem_cache *utrace_cachep;
++static struct kmem_cache *utrace_engine_cachep;
++static const struct utrace_engine_ops utrace_detached_ops; /* forward decl */
++
++static int __init utrace_init(void)
++{
++ utrace_cachep = KMEM_CACHE(utrace, SLAB_PANIC);
++ utrace_engine_cachep = KMEM_CACHE(utrace_engine, SLAB_PANIC);
++ return 0;
++}
++module_init(utrace_init);
++
++/*
++ * Set up @task.utrace for the first time. We can have races
++ * between two utrace_attach_task() calls here. The task_lock()
++ * governs installing the new pointer. If another one got in first,
++ * we just punt the new one we allocated.
++ *
++ * This returns false only in case of a memory allocation failure.
++ */
++static bool utrace_task_alloc(struct task_struct *task)
++{
++ struct utrace *utrace = kmem_cache_zalloc(utrace_cachep, GFP_KERNEL);
++ if (unlikely(!utrace))
++ return false;
++ spin_lock_init(&utrace->lock);
++ INIT_LIST_HEAD(&utrace->attached);
++ INIT_LIST_HEAD(&utrace->attaching);
++ utrace->resume = UTRACE_RESUME;
++ task_lock(task);
++ if (likely(!task->utrace)) {
++ /*
++ * This barrier makes sure the initialization of the struct
++ * precedes the installation of the pointer. This pairs
++ * with smp_read_barrier_depends() in task_utrace_struct().
++ */
++ smp_wmb();
++ task->utrace = utrace;
++ }
++ task_unlock(task);
++
++ if (unlikely(task->utrace != utrace))
++ kmem_cache_free(utrace_cachep, utrace);
++ return true;
++}
++
++/*
++ * This is called via tracehook_free_task() from free_task()
++ * when @task is being deallocated.
++ */
++void utrace_free_task(struct task_struct *task)
++{
++ kmem_cache_free(utrace_cachep, task->utrace);
++}
++
++/*
++ * This is calledwhen the task is safely quiescent, i.e. it won't consult
++ * utrace->attached without the lock. Move any engines attached
++ * asynchronously from @utrace->attaching onto the @utrace->attached list.
++ */
++static void splice_attaching(struct utrace *utrace)
++{
++ lockdep_assert_held(&utrace->lock);
++ list_splice_tail_init(&utrace->attaching, &utrace->attached);
++ utrace->pending_attach = 0;
++}
++
++/*
++ * This is the exported function used by the utrace_engine_put() inline.
++ */
++void __utrace_engine_release(struct kref *kref)
++{
++ struct utrace_engine *engine = container_of(kref, struct utrace_engine,
++ kref);
++ BUG_ON(!list_empty(&engine->entry));
++ if (engine->release)
++ (*engine->release)(engine->data);
++ kmem_cache_free(utrace_engine_cachep, engine);
++}
++EXPORT_SYMBOL_GPL(__utrace_engine_release);
++
++static bool engine_matches(struct utrace_engine *engine, int flags,
++ const struct utrace_engine_ops *ops, void *data)
++{
++ if ((flags & UTRACE_ATTACH_MATCH_OPS) && engine->ops != ops)
++ return false;
++ if ((flags & UTRACE_ATTACH_MATCH_DATA) && engine->data != data)
++ return false;
++ return engine->ops && engine->ops != &utrace_detached_ops;
++}
++
++static struct utrace_engine *find_matching_engine(
++ struct utrace *utrace, int flags,
++ const struct utrace_engine_ops *ops, void *data)
++{
++ struct utrace_engine *engine;
++ list_for_each_entry(engine, &utrace->attached, entry)
++ if (engine_matches(engine, flags, ops, data))
++ return engine;
++ list_for_each_entry(engine, &utrace->attaching, entry)
++ if (engine_matches(engine, flags, ops, data))
++ return engine;
++ return NULL;
++}
++
++/*
++ * Enqueue @engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE.
++ */
++static int utrace_add_engine(struct task_struct *target,
++ struct utrace *utrace,
++ struct utrace_engine *engine,
++ int flags,
++ const struct utrace_engine_ops *ops,
++ void *data)
++{
++ int ret;
++
++ spin_lock(&utrace->lock);
++
++ ret = -EEXIST;
++ if ((flags & UTRACE_ATTACH_EXCLUSIVE) &&
++ unlikely(find_matching_engine(utrace, flags, ops, data)))
++ goto unlock;
++
++ /*
++ * In case we had no engines before, make sure that
++ * utrace_flags is not zero. Since we did unlock+lock
++ * at least once after utrace_task_alloc() installed
++ * ->utrace, we have the necessary barrier which pairs
++ * with rmb() in task_utrace_struct().
++ */
++ ret = -ESRCH;
++ if (!target->utrace_flags) {
++ target->utrace_flags = UTRACE_EVENT(REAP);
++ /*
++ * If we race with tracehook_prepare_release_task()
++ * make sure that either it sees utrace_flags != 0
++ * or we see exit_state == EXIT_DEAD.
++ */
++ smp_mb();
++ if (unlikely(target->exit_state == EXIT_DEAD)) {
++ target->utrace_flags = 0;
++ goto unlock;
++ }
++ }
++
++ /*
++ * Put the new engine on the pending ->attaching list.
++ * Make sure it gets onto the ->attached list by the next
++ * time it's examined. Setting ->pending_attach ensures
++ * that start_report() takes the lock and splices the lists
++ * before the next new reporting pass.
++ *
++ * When target == current, it would be safe just to call
++ * splice_attaching() right here. But if we're inside a
++ * callback, that would mean the new engine also gets
++ * notified about the event that precipitated its own
++ * creation. This is not what the user wants.
++ */
++ list_add_tail(&engine->entry, &utrace->attaching);
++ utrace->pending_attach = 1;
++ ret = 0;
++unlock:
++ spin_unlock(&utrace->lock);
++
++ return ret;
++}
++
++/**
++ * utrace_attach_task - attach new engine, or look up an attached engine
++ * @target: thread to attach to
++ * @flags: flag bits combined with OR, see below
++ * @ops: callback table for new engine
++ * @data: engine private data pointer
++ *
++ * The caller must ensure that the @target thread does not get freed,
++ * i.e. hold a ref or be its parent. It is always safe to call this
++ * on @current, or on the @child pointer in a @report_clone callback.
++ * For most other cases, it's easier to use utrace_attach_pid() instead.
++ *
++ * UTRACE_ATTACH_CREATE:
++ * Create a new engine. If %UTRACE_ATTACH_CREATE is not specified, you
++ * only look up an existing engine already attached to the thread.
++ *
++ * UTRACE_ATTACH_EXCLUSIVE:
++ * Attempting to attach a second (matching) engine fails with -%EEXIST.
++ *
++ * UTRACE_ATTACH_MATCH_OPS: Only consider engines matching @ops.
++ * UTRACE_ATTACH_MATCH_DATA: Only consider engines matching @data.
++ *
++ * Calls with neither %UTRACE_ATTACH_MATCH_OPS nor %UTRACE_ATTACH_MATCH_DATA
++ * match the first among any engines attached to @target. That means that
++ * %UTRACE_ATTACH_EXCLUSIVE in such a call fails with -%EEXIST if there
++ * are any engines on @target at all.
++ */
++struct utrace_engine *utrace_attach_task(
++ struct task_struct *target, int flags,
++ const struct utrace_engine_ops *ops, void *data)
++{
++ struct utrace *utrace = task_utrace_struct(target);
++ struct utrace_engine *engine;
++ int ret;
++
++ if (!(flags & UTRACE_ATTACH_CREATE)) {
++ if (unlikely(!utrace))
++ return ERR_PTR(-ENOENT);
++ spin_lock(&utrace->lock);
++ engine = find_matching_engine(utrace, flags, ops, data);
++ if (engine)
++ utrace_engine_get(engine);
++ spin_unlock(&utrace->lock);
++ return engine ?: ERR_PTR(-ENOENT);
++ }
++
++ if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops))
++ return ERR_PTR(-EINVAL);
++
++ if (unlikely(target->flags & PF_KTHREAD))
++ /*
++ * Silly kernel, utrace is for users!
++ */
++ return ERR_PTR(-EPERM);
++
++ if (!utrace) {
++ if (unlikely(!utrace_task_alloc(target)))
++ return ERR_PTR(-ENOMEM);
++ utrace = task_utrace_struct(target);
++ }
++
++ engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL);
++ if (unlikely(!engine))
++ return ERR_PTR(-ENOMEM);
++
++ /*
++ * Initialize the new engine structure. It starts out with two
++ * refs: one ref to return, and one ref for being attached.
++ */
++ kref_set(&engine->kref, 2);
++ engine->flags = 0;
++ engine->ops = ops;
++ engine->data = data;
++ engine->release = ops->release;
++
++ ret = utrace_add_engine(target, utrace, engine, flags, ops, data);
++
++ if (unlikely(ret)) {
++ kmem_cache_free(utrace_engine_cachep, engine);
++ engine = ERR_PTR(ret);
++ }
++
++ return engine;
++}
++EXPORT_SYMBOL_GPL(utrace_attach_task);
++
++/**
++ * utrace_attach_pid - attach new engine, or look up an attached engine
++ * @pid: &struct pid pointer representing thread to attach to
++ * @flags: flag bits combined with OR, see utrace_attach_task()
++ * @ops: callback table for new engine
++ * @data: engine private data pointer
++ *
++ * This is the same as utrace_attach_task(), but takes a &struct pid
++ * pointer rather than a &struct task_struct pointer. The caller must
++ * hold a ref on @pid, but does not need to worry about the task
++ * staying valid. If it's been reaped so that @pid points nowhere,
++ * then this call returns -%ESRCH.
++ */
++struct utrace_engine *utrace_attach_pid(
++ struct pid *pid, int flags,
++ const struct utrace_engine_ops *ops, void *data)
++{
++ struct utrace_engine *engine = ERR_PTR(-ESRCH);
++ struct task_struct *task = get_pid_task(pid, PIDTYPE_PID);
++ if (task) {
++ engine = utrace_attach_task(task, flags, ops, data);
++ put_task_struct(task);
++ }
++ return engine;
++}
++EXPORT_SYMBOL_GPL(utrace_attach_pid);
++
++/*
++ * When an engine is detached, the target thread may still see it and
++ * make callbacks until it quiesces. We install a special ops vector
++ * with these two callbacks. When the target thread quiesces, it can
++ * safely free the engine itself. For any event we will always get
++ * the report_quiesce() callback first, so we only need this one
++ * pointer to be set. The only exception is report_reap(), so we
++ * supply that callback too.
++ */
++static u32 utrace_detached_quiesce(u32 action, struct utrace_engine *engine,
++ unsigned long event)
++{
++ return UTRACE_DETACH;
++}
++
++static void utrace_detached_reap(struct utrace_engine *engine,
++ struct task_struct *task)
++{
++}
++
++static const struct utrace_engine_ops utrace_detached_ops = {
++ .report_quiesce = &utrace_detached_quiesce,
++ .report_reap = &utrace_detached_reap
++};
++
++/*
++ * The caller has to hold a ref on the engine. If the attached flag is
++ * true (all but utrace_barrier() calls), the engine is supposed to be
++ * attached. If the attached flag is false (utrace_barrier() only),
++ * then return -ERESTARTSYS for an engine marked for detach but not yet
++ * fully detached. The task pointer can be invalid if the engine is
++ * detached.
++ *
++ * Get the utrace lock for the target task.
++ * Returns the struct if locked, or ERR_PTR(-errno).
++ *
++ * This has to be robust against races with:
++ * utrace_control(target, UTRACE_DETACH) calls
++ * UTRACE_DETACH after reports
++ * utrace_report_death
++ * utrace_release_task
++ */
++static struct utrace *get_utrace_lock(struct task_struct *target,
++ struct utrace_engine *engine,
++ bool attached)
++ __acquires(utrace->lock)
++{
++ struct utrace *utrace;
++
++ rcu_read_lock();
++
++ /*
++ * If this engine was already detached, bail out before we look at
++ * the task_struct pointer at all. If it's detached after this
++ * check, then RCU is still keeping this task_struct pointer valid.
++ *
++ * The ops pointer is NULL when the engine is fully detached.
++ * It's &utrace_detached_ops when it's marked detached but still
++ * on the list. In the latter case, utrace_barrier() still works,
++ * since the target might be in the middle of an old callback.
++ */
++ if (unlikely(!engine->ops)) {
++ rcu_read_unlock();
++ return ERR_PTR(-ESRCH);
++ }
++
++ if (unlikely(engine->ops == &utrace_detached_ops)) {
++ rcu_read_unlock();
++ return attached ? ERR_PTR(-ESRCH) : ERR_PTR(-ERESTARTSYS);
++ }
++
++ utrace = task_utrace_struct(target);
++ spin_lock(&utrace->lock);
++ if (unlikely(!engine->ops) ||
++ unlikely(engine->ops == &utrace_detached_ops)) {
++ /*
++ * By the time we got the utrace lock,
++ * it had been reaped or detached already.
++ */
++ spin_unlock(&utrace->lock);
++ utrace = ERR_PTR(-ESRCH);
++ if (!attached && engine->ops == &utrace_detached_ops)
++ utrace = ERR_PTR(-ERESTARTSYS);
++ }
++ rcu_read_unlock();
++
++ return utrace;
++}
++
++/*
++ * Now that we don't hold any locks, run through any
++ * detached engines and free their references. Each
++ * engine had one implicit ref while it was attached.
++ */
++static void put_detached_list(struct list_head *list)
++{
++ struct utrace_engine *engine, *next;
++ list_for_each_entry_safe(engine, next, list, entry) {
++ list_del_init(&engine->entry);
++ utrace_engine_put(engine);
++ }
++}
++
++/*
++ * We use an extra bit in utrace_engine.flags past the event bits,
++ * to record whether the engine is keeping the target thread stopped.
++ *
++ * This bit is set in task_struct.utrace_flags whenever it is set in any
++ * engine's flags. Only utrace_reset() resets it in utrace_flags.
++ */
++#define ENGINE_STOP (1UL << _UTRACE_NEVENTS)
++
++static void mark_engine_wants_stop(struct task_struct *task,
++ struct utrace_engine *engine)
++{
++ engine->flags |= ENGINE_STOP;
++ task->utrace_flags |= ENGINE_STOP;
++}
++
++static void clear_engine_wants_stop(struct utrace_engine *engine)
++{
++ engine->flags &= ~ENGINE_STOP;
++}
++
++static bool engine_wants_stop(struct utrace_engine *engine)
++{
++ return (engine->flags & ENGINE_STOP) != 0;
++}
++
++/**
++ * utrace_set_events - choose which event reports a tracing engine gets
++ * @target: thread to affect
++ * @engine: attached engine to affect
++ * @events: new event mask
++ *
++ * This changes the set of events for which @engine wants callbacks made.
++ *
++ * This fails with -%EALREADY and does nothing if you try to clear
++ * %UTRACE_EVENT(%DEATH) when the @report_death callback may already have
++ * begun, if you try to clear %UTRACE_EVENT(%REAP) when the @report_reap
++ * callback may already have begun, or if you try to newly set
++ * %UTRACE_EVENT(%DEATH) or %UTRACE_EVENT(%QUIESCE) when @target is
++ * already dead or dying.
++ *
++ * This can fail with -%ESRCH when @target has already been detached,
++ * including forcible detach on reaping.
++ *
++ * If @target was stopped before the call, then after a successful call,
++ * no event callbacks not requested in @events will be made; if
++ * %UTRACE_EVENT(%QUIESCE) is included in @events, then a
++ * @report_quiesce callback will be made when @target resumes.
++ *
++ * If @target was not stopped and @events excludes some bits that were
++ * set before, this can return -%EINPROGRESS to indicate that @target
++ * may have been making some callback to @engine. When this returns
++ * zero, you can be sure that no event callbacks you've disabled in
++ * @events can be made. If @events only sets new bits that were not set
++ * before on @engine, then -%EINPROGRESS will never be returned.
++ *
++ * To synchronize after an -%EINPROGRESS return, see utrace_barrier().
++ *
++ * When @target is @current, -%EINPROGRESS is not returned. But note
++ * that a newly-created engine will not receive any callbacks related to
++ * an event notification already in progress. This call enables @events
++ * callbacks to be made as soon as @engine becomes eligible for any
++ * callbacks, see utrace_attach_task().
++ *
++ * These rules provide for coherent synchronization based on %UTRACE_STOP,
++ * even when %SIGKILL is breaking its normal simple rules.
++ */
++int utrace_set_events(struct task_struct *target,
++ struct utrace_engine *engine,
++ unsigned long events)
++{
++ struct utrace *utrace;
++ unsigned long old_flags, old_utrace_flags;
++ int ret;
++
++ /*
++ * We just ignore the internal bit, so callers can use
++ * engine->flags to seed bitwise ops for our argument.
++ */
++ events &= ~ENGINE_STOP;
++
++ utrace = get_utrace_lock(target, engine, true);
++ if (unlikely(IS_ERR(utrace)))
++ return PTR_ERR(utrace);
++
++ old_utrace_flags = target->utrace_flags;
++ old_flags = engine->flags & ~ENGINE_STOP;
++
++ if (target->exit_state &&
++ (((events & ~old_flags) & _UTRACE_DEATH_EVENTS) ||
++ (utrace->death &&
++ ((old_flags & ~events) & _UTRACE_DEATH_EVENTS)) ||
++ (utrace->reap && ((old_flags & ~events) & UTRACE_EVENT(REAP))))) {
++ spin_unlock(&utrace->lock);
++ return -EALREADY;
++ }
++
++ /*
++ * When setting these flags, it's essential that we really
++ * synchronize with exit_notify(). They cannot be set after
++ * exit_notify() takes the tasklist_lock. By holding the read
++ * lock here while setting the flags, we ensure that the calls
++ * to tracehook_notify_death() and tracehook_report_death() will
++ * see the new flags. This ensures that utrace_release_task()
++ * knows positively that utrace_report_death() will be called or
++ * that it won't.
++ */
++ if ((events & ~old_utrace_flags) & _UTRACE_DEATH_EVENTS) {
++ read_lock(&tasklist_lock);
++ if (unlikely(target->exit_state)) {
++ read_unlock(&tasklist_lock);
++ spin_unlock(&utrace->lock);
++ return -EALREADY;
++ }
++ target->utrace_flags |= events;
++ read_unlock(&tasklist_lock);
++ }
++
++ engine->flags = events | (engine->flags & ENGINE_STOP);
++ target->utrace_flags |= events;
++
++ if ((events & UTRACE_EVENT_SYSCALL) &&
++ !(old_utrace_flags & UTRACE_EVENT_SYSCALL))
++ set_tsk_thread_flag(target, TIF_SYSCALL_TRACE);
++
++ ret = 0;
++ if ((old_flags & ~events) && target != current &&
++ !task_is_stopped_or_traced(target) && !target->exit_state) {
++ /*
++ * This barrier ensures that our engine->flags changes
++ * have hit before we examine utrace->reporting,
++ * pairing with the barrier in start_callback(). If
++ * @target has not yet hit finish_callback() to clear
++ * utrace->reporting, we might be in the middle of a
++ * callback to @engine.
++ */
++ smp_mb();
++ if (utrace->reporting == engine)
++ ret = -EINPROGRESS;
++ }
++
++ spin_unlock(&utrace->lock);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(utrace_set_events);
++
++/*
++ * Asynchronously mark an engine as being detached.
++ *
++ * This must work while the target thread races with us doing
++ * start_callback(), defined below. It uses smp_rmb() between checking
++ * @engine->flags and using @engine->ops. Here we change @engine->ops
++ * first, then use smp_wmb() before changing @engine->flags. This ensures
++ * it can check the old flags before using the old ops, or check the old
++ * flags before using the new ops, or check the new flags before using the
++ * new ops, but can never check the new flags before using the old ops.
++ * Hence, utrace_detached_ops might be used with any old flags in place.
++ * It has report_quiesce() and report_reap() callbacks to handle all cases.
++ */
++static void mark_engine_detached(struct utrace_engine *engine)
++{
++ engine->ops = &utrace_detached_ops;
++ smp_wmb();
++ engine->flags = UTRACE_EVENT(QUIESCE);
++}
++
++/*
++ * Get @target to stop and return true if it is already stopped now.
++ * If we return false, it will make some event callback soonish.
++ * Called with @utrace locked.
++ */
++static bool utrace_do_stop(struct task_struct *target, struct utrace *utrace)
++{
++ if (task_is_stopped(target)) {
++ /*
++ * Stopped is considered quiescent; when it wakes up, it will
++ * go through utrace_finish_stop() before doing anything else.
++ */
++ spin_lock_irq(&target->sighand->siglock);
++ if (likely(task_is_stopped(target)))
++ __set_task_state(target, TASK_TRACED);
++ spin_unlock_irq(&target->sighand->siglock);
++ } else if (utrace->resume > UTRACE_REPORT) {
++ utrace->resume = UTRACE_REPORT;
++ set_notify_resume(target);
++ }
++
++ return task_is_traced(target);
++}
++
++/*
++ * If the target is not dead it should not be in tracing
++ * stop any more. Wake it unless it's in job control stop.
++ */
++static void utrace_wakeup(struct task_struct *target, struct utrace *utrace)
++{
++ lockdep_assert_held(&utrace->lock);
++ spin_lock_irq(&target->sighand->siglock);
++ if (target->signal->flags & SIGNAL_STOP_STOPPED ||
++ target->signal->group_stop_count)
++ target->state = TASK_STOPPED;
++ else
++ wake_up_state(target, __TASK_TRACED);
++ spin_unlock_irq(&target->sighand->siglock);
++}
++
++/*
++ * This is called when there might be some detached engines on the list or
++ * some stale bits in @task->utrace_flags. Clean them up and recompute the
++ * flags. Returns true if we're now fully detached.
++ *
++ * Called with @utrace->lock held, returns with it released.
++ * After this returns, @utrace might be freed if everything detached.
++ */
++static bool utrace_reset(struct task_struct *task, struct utrace *utrace)
++ __releases(utrace->lock)
++{
++ struct utrace_engine *engine, *next;
++ unsigned long flags = 0;
++ LIST_HEAD(detached);
++
++ splice_attaching(utrace);
++
++ /*
++ * Update the set of events of interest from the union
++ * of the interests of the remaining tracing engines.
++ * For any engine marked detached, remove it from the list.
++ * We'll collect them on the detached list.
++ */
++ list_for_each_entry_safe(engine, next, &utrace->attached, entry) {
++ if (engine->ops == &utrace_detached_ops) {
++ engine->ops = NULL;
++ list_move(&engine->entry, &detached);
++ } else {
++ flags |= engine->flags | UTRACE_EVENT(REAP);
++ }
++ }
++
++ if (task->exit_state) {
++ /*
++ * Once it's already dead, we never install any flags
++ * except REAP. When ->exit_state is set and events
++ * like DEATH are not set, then they never can be set.
++ * This ensures that utrace_release_task() knows
++ * positively that utrace_report_death() can never run.
++ */
++ BUG_ON(utrace->death);
++ flags &= UTRACE_EVENT(REAP);
++ } else if (!(flags & UTRACE_EVENT_SYSCALL) &&
++ test_tsk_thread_flag(task, TIF_SYSCALL_TRACE)) {
++ clear_tsk_thread_flag(task, TIF_SYSCALL_TRACE);
++ }
++
++ if (!flags) {
++ /*
++ * No more engines, cleared out the utrace.
++ */
++ utrace->resume = UTRACE_RESUME;
++ utrace->signal_handler = 0;
++ }
++
++ /*
++ * If no more engines want it stopped, wake it up.
++ */
++ if (task_is_traced(task) && !(flags & ENGINE_STOP))
++ utrace_wakeup(task, utrace);
++
++ /*
++ * In theory spin_lock() doesn't imply rcu_read_lock().
++ * Once we clear ->utrace_flags this task_struct can go away
++ * because tracehook_prepare_release_task() path does not take
++ * utrace->lock when ->utrace_flags == 0.
++ */
++ rcu_read_lock();
++ task->utrace_flags = flags;
++ spin_unlock(&utrace->lock);
++ rcu_read_unlock();
++
++ put_detached_list(&detached);
++
++ return !flags;
++}
++
++void utrace_finish_stop(void)
++{
++ /*
++ * If we were task_is_traced() and then SIGKILL'ed, make
++ * sure we do nothing until the tracer drops utrace->lock.
++ */
++ if (unlikely(__fatal_signal_pending(current))) {
++ struct utrace *utrace = task_utrace_struct(current);
++ spin_unlock_wait(&utrace->lock);
++ }
++}
++
++/*
++ * Perform %UTRACE_STOP, i.e. block in TASK_TRACED until woken up.
++ * @task == current, @utrace == current->utrace, which is not locked.
++ * Return true if we were woken up by SIGKILL even though some utrace
++ * engine may still want us to stay stopped.
++ */
++static void utrace_stop(struct task_struct *task, struct utrace *utrace,
++ enum utrace_resume_action action)
++{
++relock:
++ spin_lock(&utrace->lock);
++
++ if (action < utrace->resume) {
++ /*
++ * Ensure a reporting pass when we're resumed.
++ */
++ utrace->resume = action;
++ if (action == UTRACE_INTERRUPT)
++ set_thread_flag(TIF_SIGPENDING);
++ else
++ set_thread_flag(TIF_NOTIFY_RESUME);
++ }
++
++ /*
++ * If the ENGINE_STOP bit is clear in utrace_flags, that means
++ * utrace_reset() ran after we processed some UTRACE_STOP return
++ * values from callbacks to get here. If all engines have detached
++ * or resumed us, we don't stop. This check doesn't require
++ * siglock, but it should follow the interrupt/report bookkeeping
++ * steps (this can matter for UTRACE_RESUME but not UTRACE_DETACH).
++ */
++ if (unlikely(!(task->utrace_flags & ENGINE_STOP))) {
++ utrace_reset(task, utrace);
++ if (task->utrace_flags & ENGINE_STOP)
++ goto relock;
++ return;
++ }
++
++ /*
++ * The siglock protects us against signals. As well as SIGKILL
++ * waking us up, we must synchronize with the signal bookkeeping
++ * for stop signals and SIGCONT.
++ */
++ spin_lock_irq(&task->sighand->siglock);
++
++ if (unlikely(__fatal_signal_pending(task))) {
++ spin_unlock_irq(&task->sighand->siglock);
++ spin_unlock(&utrace->lock);
++ return;
++ }
++
++ __set_current_state(TASK_TRACED);
++
++ /*
++ * If there is a group stop in progress,
++ * we must participate in the bookkeeping.
++ */
++ if (unlikely(task->signal->group_stop_count) &&
++ !--task->signal->group_stop_count)
++ task->signal->flags = SIGNAL_STOP_STOPPED;
++
++ spin_unlock_irq(&task->sighand->siglock);
++ spin_unlock(&utrace->lock);
++
++ schedule();
++
++ utrace_finish_stop();
++
++ /*
++ * While in TASK_TRACED, we were considered "frozen enough".
++ * Now that we woke up, it's crucial if we're supposed to be
++ * frozen that we freeze now before running anything substantial.
++ */
++ try_to_freeze();
++
++ /*
++ * While we were in TASK_TRACED, complete_signal() considered
++ * us "uninterested" in signal wakeups. Now make sure our
++ * TIF_SIGPENDING state is correct for normal running.
++ */
++ spin_lock_irq(&task->sighand->siglock);
++ recalc_sigpending();
++ spin_unlock_irq(&task->sighand->siglock);
++}
++
++/*
++ * Called by release_task() with @reap set to true.
++ * Called by utrace_report_death() with @reap set to false.
++ * On reap, make report_reap callbacks and clean out @utrace
++ * unless still making callbacks. On death, update bookkeeping
++ * and handle the reap work if release_task() came in first.
++ */
++void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace,
++ bool reap)
++{
++ struct utrace_engine *engine, *next;
++ struct list_head attached;
++
++ spin_lock(&utrace->lock);
++
++ if (reap) {
++ /*
++ * If the target will do some final callbacks but hasn't
++ * finished them yet, we know because it clears these event
++ * bits after it's done. Instead of cleaning up here and
++ * requiring utrace_report_death() to cope with it, we
++ * delay the REAP report and the teardown until after the
++ * target finishes its death reports.
++ */
++ utrace->reap = 1;
++
++ if (target->utrace_flags & _UTRACE_DEATH_EVENTS) {
++ spin_unlock(&utrace->lock);
++ return;
++ }
++ } else {
++ /*
++ * After we unlock with this flag clear, any competing
++ * utrace_control/utrace_set_events calls know that we've
++ * finished our callbacks and any detach bookkeeping.
++ */
++ utrace->death = 0;
++
++ if (!utrace->reap) {
++ /*
++ * We're just dead, not reaped yet. This will
++ * reset @target->utrace_flags so the later call
++ * with @reap set won't hit the check above.
++ */
++ utrace_reset(target, utrace);
++ return;
++ }
++ }
++
++ /*
++ * utrace_add_engine() checks ->utrace_flags != 0. Since
++ * @utrace->reap is set, nobody can set or clear UTRACE_EVENT(REAP)
++ * in @engine->flags or change @engine->ops and nobody can change
++ * @utrace->attached after we drop the lock.
++ */
++ target->utrace_flags = 0;
++
++ /*
++ * We clear out @utrace->attached before we drop the lock so
++ * that find_matching_engine() can't come across any old engine
++ * while we are busy tearing it down.
++ */
++ list_replace_init(&utrace->attached, &attached);
++ list_splice_tail_init(&utrace->attaching, &attached);
++
++ spin_unlock(&utrace->lock);
++
++ list_for_each_entry_safe(engine, next, &attached, entry) {
++ if (engine->flags & UTRACE_EVENT(REAP))
++ engine->ops->report_reap(engine, target);
++
++ engine->ops = NULL;
++ engine->flags = 0;
++ list_del_init(&engine->entry);
++
++ utrace_engine_put(engine);
++ }
++}
++
++/*
++ * You can't do anything to a dead task but detach it.
++ * If release_task() has been called, you can't do that.
++ *
++ * On the exit path, DEATH and QUIESCE event bits are set only
++ * before utrace_report_death() has taken the lock. At that point,
++ * the death report will come soon, so disallow detach until it's
++ * done. This prevents us from racing with it detaching itself.
++ *
++ * Called only when @target->exit_state is nonzero.
++ */
++static inline int utrace_control_dead(struct task_struct *target,
++ struct utrace *utrace,
++ enum utrace_resume_action action)
++{
++ lockdep_assert_held(&utrace->lock);
++
++ if (action != UTRACE_DETACH || unlikely(utrace->reap))
++ return -ESRCH;
++
++ if (unlikely(utrace->death))
++ /*
++ * We have already started the death report. We can't
++ * prevent the report_death and report_reap callbacks,
++ * so tell the caller they will happen.
++ */
++ return -EALREADY;
++
++ return 0;
++}
++
++/**
++ * utrace_control - control a thread being traced by a tracing engine
++ * @target: thread to affect
++ * @engine: attached engine to affect
++ * @action: &enum utrace_resume_action for thread to do
++ *
++ * This is how a tracing engine asks a traced thread to do something.
++ * This call is controlled by the @action argument, which has the
++ * same meaning as the &enum utrace_resume_action value returned by
++ * event reporting callbacks.
++ *
++ * If @target is already dead (@target->exit_state nonzero),
++ * all actions except %UTRACE_DETACH fail with -%ESRCH.
++ *
++ * The following sections describe each option for the @action argument.
++ *
++ * UTRACE_DETACH:
++ *
++ * After this, the @engine data structure is no longer accessible,
++ * and the thread might be reaped. The thread will start running
++ * again if it was stopped and no longer has any attached engines
++ * that want it stopped.
++ *
++ * If the @report_reap callback may already have begun, this fails
++ * with -%ESRCH. If the @report_death callback may already have
++ * begun, this fails with -%EALREADY.
++ *
++ * If @target is not already stopped, then a callback to this engine
++ * might be in progress or about to start on another CPU. If so,
++ * then this returns -%EINPROGRESS; the detach happens as soon as
++ * the pending callback is finished. To synchronize after an
++ * -%EINPROGRESS return, see utrace_barrier().
++ *
++ * If @target is properly stopped before utrace_control() is called,
++ * then after successful return it's guaranteed that no more callbacks
++ * to the @engine->ops vector will be made.
++ *
++ * The only exception is %SIGKILL (and exec or group-exit by another
++ * thread in the group), which can cause asynchronous @report_death
++ * and/or @report_reap callbacks even when %UTRACE_STOP was used.
++ * (In that event, this fails with -%ESRCH or -%EALREADY, see above.)
++ *
++ * UTRACE_STOP:
++ *
++ * This asks that @target stop running. This returns 0 only if
++ * @target is already stopped, either for tracing or for job
++ * control. Then @target will remain stopped until another
++ * utrace_control() call is made on @engine; @target can be woken
++ * only by %SIGKILL (or equivalent, such as exec or termination by
++ * another thread in the same thread group).
++ *
++ * This returns -%EINPROGRESS if @target is not already stopped.
++ * Then the effect is like %UTRACE_REPORT. A @report_quiesce or
++ * @report_signal callback will be made soon. Your callback can
++ * then return %UTRACE_STOP to keep @target stopped.
++ *
++ * This does not interrupt system calls in progress, including ones
++ * that sleep for a long time. For that, use %UTRACE_INTERRUPT.
++ * To interrupt system calls and then keep @target stopped, your
++ * @report_signal callback can return %UTRACE_STOP.
++ *
++ * UTRACE_RESUME:
++ *
++ * Just let @target continue running normally, reversing the effect
++ * of a previous %UTRACE_STOP. If another engine is keeping @target
++ * stopped, then it remains stopped until all engines let it resume.
++ * If @target was not stopped, this has no effect.
++ *
++ * UTRACE_REPORT:
++ *
++ * This is like %UTRACE_RESUME, but also ensures that there will be
++ * a @report_quiesce or @report_signal callback made soon. If
++ * @target had been stopped, then there will be a callback before it
++ * resumes running normally. If another engine is keeping @target
++ * stopped, then there might be no callbacks until all engines let
++ * it resume.
++ *
++ * Since this is meaningless unless @report_quiesce callbacks will
++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE).
++ *
++ * UTRACE_INTERRUPT:
++ *
++ * This is like %UTRACE_REPORT, but ensures that @target will make a
++ * @report_signal callback before it resumes or delivers signals.
++ * If @target was in a system call or about to enter one, work in
++ * progress will be interrupted as if by %SIGSTOP. If another
++ * engine is keeping @target stopped, then there might be no
++ * callbacks until all engines let it resume.
++ *
++ * This gives @engine an opportunity to introduce a forced signal
++ * disposition via its @report_signal callback.
++ *
++ * UTRACE_SINGLESTEP:
++ *
++ * It's invalid to use this unless arch_has_single_step() returned true.
++ * This is like %UTRACE_RESUME, but resumes for one user instruction only.
++ *
++ * Note that passing %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP to
++ * utrace_control() or returning it from an event callback alone does
++ * not necessarily ensure that stepping will be enabled. If there are
++ * more callbacks made to any engine before returning to user mode,
++ * then the resume action is chosen only by the last set of callbacks.
++ * To be sure, enable %UTRACE_EVENT(%QUIESCE) and look for the
++ * @report_quiesce callback with a zero event mask, or the
++ * @report_signal callback with %UTRACE_SIGNAL_REPORT.
++ *
++ * Since this is not robust unless @report_quiesce callbacks will
++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE).
++ *
++ * UTRACE_BLOCKSTEP:
++ *
++ * It's invalid to use this unless arch_has_block_step() returned true.
++ * This is like %UTRACE_SINGLESTEP, but resumes for one whole basic
++ * block of user instructions.
++ *
++ * Since this is not robust unless @report_quiesce callbacks will
++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE).
++ *
++ * %UTRACE_BLOCKSTEP devolves to %UTRACE_SINGLESTEP when another
++ * tracing engine is using %UTRACE_SINGLESTEP at the same time.
++ */
++int utrace_control(struct task_struct *target,
++ struct utrace_engine *engine,
++ enum utrace_resume_action action)
++{
++ struct utrace *utrace;
++ bool reset;
++ int ret;
++
++ if (unlikely(action >= UTRACE_RESUME_MAX)) {
++ WARN(1, "invalid action argument to utrace_control()!");
++ return -EINVAL;
++ }
++
++ /*
++ * This is a sanity check for a programming error in the caller.
++ * Their request can only work properly in all cases by relying on
++ * a follow-up callback, but they didn't set one up! This check
++ * doesn't do locking, but it shouldn't matter. The caller has to
++ * be synchronously sure the callback is set up to be operating the
++ * interface properly.
++ */
++ if (action >= UTRACE_REPORT && action < UTRACE_RESUME &&
++ unlikely(!(engine->flags & UTRACE_EVENT(QUIESCE)))) {
++ WARN(1, "utrace_control() with no QUIESCE callback in place!");
++ return -EINVAL;
++ }
++
++ utrace = get_utrace_lock(target, engine, true);
++ if (unlikely(IS_ERR(utrace)))
++ return PTR_ERR(utrace);
++
++ reset = task_is_traced(target);
++ ret = 0;
++
++ /*
++ * ->exit_state can change under us, this doesn't matter.
++ * We do not care about ->exit_state in fact, but we do
++ * care about ->reap and ->death. If either flag is set,
++ * we must also see ->exit_state != 0.
++ */
++ if (unlikely(target->exit_state)) {
++ ret = utrace_control_dead(target, utrace, action);
++ if (ret) {
++ spin_unlock(&utrace->lock);
++ return ret;
++ }
++ reset = true;
++ }
++
++ switch (action) {
++ case UTRACE_STOP:
++ mark_engine_wants_stop(target, engine);
++ if (!reset && !utrace_do_stop(target, utrace))
++ ret = -EINPROGRESS;
++ reset = false;
++ break;
++
++ case UTRACE_DETACH:
++ if (engine_wants_stop(engine))
++ target->utrace_flags &= ~ENGINE_STOP;
++ mark_engine_detached(engine);
++ reset = reset || utrace_do_stop(target, utrace);
++ if (!reset) {
++ /*
++ * As in utrace_set_events(), this barrier ensures
++ * that our engine->flags changes have hit before we
++ * examine utrace->reporting, pairing with the barrier
++ * in start_callback(). If @target has not yet hit
++ * finish_callback() to clear utrace->reporting, we
++ * might be in the middle of a callback to @engine.
++ */
++ smp_mb();
++ if (utrace->reporting == engine)
++ ret = -EINPROGRESS;
++ }
++ break;
++
++ case UTRACE_RESUME:
++ /*
++ * This and all other cases imply resuming if stopped.
++ * There might not be another report before it just
++ * resumes, so make sure single-step is not left set.
++ */
++ clear_engine_wants_stop(engine);
++ if (likely(reset))
++ user_disable_single_step(target);
++ break;
++
++ case UTRACE_BLOCKSTEP:
++ /*
++ * Resume from stopped, step one block.
++ * We fall through to treat it like UTRACE_SINGLESTEP.
++ */
++ if (unlikely(!arch_has_block_step())) {
++ WARN(1, "UTRACE_BLOCKSTEP when !arch_has_block_step()");
++ action = UTRACE_SINGLESTEP;
++ }
++
++ case UTRACE_SINGLESTEP:
++ /*
++ * Resume from stopped, step one instruction.
++ * We fall through to the UTRACE_REPORT case.
++ */
++ if (unlikely(!arch_has_single_step())) {
++ WARN(1,
++ "UTRACE_SINGLESTEP when !arch_has_single_step()");
++ reset = false;
++ ret = -EOPNOTSUPP;
++ break;
++ }
++
++ case UTRACE_REPORT:
++ /*
++ * Make the thread call tracehook_notify_resume() soon.
++ * But don't bother if it's already been interrupted.
++ * In that case, utrace_get_signal() will be reporting soon.
++ */
++ clear_engine_wants_stop(engine);
++ if (action < utrace->resume) {
++ utrace->resume = action;
++ set_notify_resume(target);
++ }
++ break;
++
++ case UTRACE_INTERRUPT:
++ /*
++ * Make the thread call tracehook_get_signal() soon.
++ */
++ clear_engine_wants_stop(engine);
++ if (utrace->resume == UTRACE_INTERRUPT)
++ break;
++ utrace->resume = UTRACE_INTERRUPT;
++
++ /*
++ * If it's not already stopped, interrupt it now. We need
++ * the siglock here in case it calls recalc_sigpending()
++ * and clears its own TIF_SIGPENDING. By taking the lock,
++ * we've serialized any later recalc_sigpending() after our
++ * setting of utrace->resume to force it on.
++ */
++ if (reset) {
++ /*
++ * This is really just to keep the invariant that
++ * TIF_SIGPENDING is set with UTRACE_INTERRUPT.
++ * When it's stopped, we know it's always going
++ * through utrace_get_signal() and will recalculate.
++ */
++ set_tsk_thread_flag(target, TIF_SIGPENDING);
++ } else {
++ struct sighand_struct *sighand;
++ unsigned long irqflags;
++ sighand = lock_task_sighand(target, &irqflags);
++ if (likely(sighand)) {
++ signal_wake_up(target, 0);
++ unlock_task_sighand(target, &irqflags);
++ }
++ }
++ break;
++
++ default:
++ BUG(); /* We checked it on entry. */
++ }
++
++ /*
++ * Let the thread resume running. If it's not stopped now,
++ * there is nothing more we need to do.
++ */
++ if (reset)
++ utrace_reset(target, utrace);
++ else
++ spin_unlock(&utrace->lock);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(utrace_control);
++
++/**
++ * utrace_barrier - synchronize with simultaneous tracing callbacks
++ * @target: thread to affect
++ * @engine: engine to affect (can be detached)
++ *
++ * This blocks while @target might be in the midst of making a callback to
++ * @engine. It can be interrupted by signals and will return -%ERESTARTSYS.
++ * A return value of zero means no callback from @target to @engine was
++ * in progress. Any effect of its return value (such as %UTRACE_STOP) has
++ * already been applied to @engine.
++ *
++ * It's not necessary to keep the @target pointer alive for this call.
++ * It's only necessary to hold a ref on @engine. This will return
++ * safely even if @target has been reaped and has no task refs.
++ *
++ * A successful return from utrace_barrier() guarantees its ordering
++ * with respect to utrace_set_events() and utrace_control() calls. If
++ * @target was not properly stopped, event callbacks just disabled might
++ * still be in progress; utrace_barrier() waits until there is no chance
++ * an unwanted callback can be in progress.
++ */
++int utrace_barrier(struct task_struct *target, struct utrace_engine *engine)
++{
++ struct utrace *utrace;
++ int ret = -ERESTARTSYS;
++
++ if (unlikely(target == current))
++ return 0;
++
++ do {
++ utrace = get_utrace_lock(target, engine, false);
++ if (unlikely(IS_ERR(utrace))) {
++ ret = PTR_ERR(utrace);
++ if (ret != -ERESTARTSYS)
++ break;
++ } else {
++ /*
++ * All engine state changes are done while
++ * holding the lock, i.e. before we get here.
++ * Since we have the lock, we only need to
++ * worry about @target making a callback.
++ * When it has entered start_callback() but
++ * not yet gotten to finish_callback(), we
++ * will see utrace->reporting == @engine.
++ * When @target doesn't take the lock, it uses
++ * barriers to order setting utrace->reporting
++ * before it examines the engine state.
++ */
++ if (utrace->reporting != engine)
++ ret = 0;
++ spin_unlock(&utrace->lock);
++ if (!ret)
++ break;
++ }
++ schedule_timeout_interruptible(1);
++ } while (!signal_pending(current));
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(utrace_barrier);
++
++/*
++ * This is local state used for reporting loops, perhaps optimized away.
++ */
++struct utrace_report {
++ u32 result;
++ enum utrace_resume_action action;
++ enum utrace_resume_action resume_action;
++ bool detaches;
++ bool spurious;
++};
++
++#define INIT_REPORT(var) \
++ struct utrace_report var = { \
++ .action = UTRACE_RESUME, \
++ .resume_action = UTRACE_RESUME, \
++ .spurious = true \
++ }
++
++/*
++ * We are now making the report, so clear the flag saying we need one.
++ * When there is a new attach, ->pending_attach is set just so we will
++ * know to do splice_attaching() here before the callback loop.
++ */
++static enum utrace_resume_action start_report(struct utrace *utrace)
++{
++ enum utrace_resume_action resume = utrace->resume;
++ if (utrace->pending_attach ||
++ (resume > UTRACE_INTERRUPT && resume < UTRACE_RESUME)) {
++ spin_lock(&utrace->lock);
++ splice_attaching(utrace);
++ resume = utrace->resume;
++ if (resume > UTRACE_INTERRUPT)
++ utrace->resume = UTRACE_RESUME;
++ spin_unlock(&utrace->lock);
++ }
++ return resume;
++}
++
++static inline void finish_report_reset(struct task_struct *task,
++ struct utrace *utrace,
++ struct utrace_report *report)
++{
++ if (unlikely(report->spurious || report->detaches)) {
++ spin_lock(&utrace->lock);
++ if (utrace_reset(task, utrace))
++ report->action = UTRACE_RESUME;
++ }
++}
++
++/*
++ * Complete a normal reporting pass, pairing with a start_report() call.
++ * This handles any UTRACE_DETACH or UTRACE_REPORT or UTRACE_INTERRUPT
++ * returns from engine callbacks. If @will_not_stop is true and any
++ * engine's last callback used UTRACE_STOP, we do UTRACE_REPORT here to
++ * ensure we stop before user mode. If there were no callbacks made, it
++ * will recompute @task->utrace_flags to avoid another false-positive.
++ */
++static void finish_report(struct task_struct *task, struct utrace *utrace,
++ struct utrace_report *report, bool will_not_stop)
++{
++ enum utrace_resume_action resume = report->action;
++
++ if (resume == UTRACE_STOP)
++ resume = will_not_stop ? UTRACE_REPORT : UTRACE_RESUME;
++
++ if (resume < utrace->resume) {
++ spin_lock(&utrace->lock);
++ utrace->resume = resume;
++ if (resume == UTRACE_INTERRUPT)
++ set_tsk_thread_flag(task, TIF_SIGPENDING);
++ else
++ set_tsk_thread_flag(task, TIF_NOTIFY_RESUME);
++ spin_unlock(&utrace->lock);
++ }
++
++ finish_report_reset(task, utrace, report);
++}
++
++static void finish_callback_report(struct task_struct *task,
++ struct utrace *utrace,
++ struct utrace_report *report,
++ struct utrace_engine *engine,
++ enum utrace_resume_action action)
++{
++ if (action == UTRACE_DETACH) {
++ /*
++ * By holding the lock here, we make sure that
++ * utrace_barrier() (really get_utrace_lock()) sees the
++ * effect of this detach. Otherwise utrace_barrier() could
++ * return 0 after this callback had returned UTRACE_DETACH.
++ * This way, a 0 return is an unambiguous indicator that any
++ * callback returning UTRACE_DETACH has indeed caused detach.
++ */
++ spin_lock(&utrace->lock);
++ engine->ops = &utrace_detached_ops;
++ spin_unlock(&utrace->lock);
++ }
++
++ /*
++ * If utrace_control() was used, treat that like UTRACE_DETACH here.
++ */
++ if (engine->ops == &utrace_detached_ops) {
++ report->detaches = true;
++ return;
++ }
++
++ if (action < report->action)
++ report->action = action;
++
++ if (action != UTRACE_STOP) {
++ if (action < report->resume_action)
++ report->resume_action = action;
++
++ if (engine_wants_stop(engine)) {
++ spin_lock(&utrace->lock);
++ clear_engine_wants_stop(engine);
++ spin_unlock(&utrace->lock);
++ }
++
++ return;
++ }
++
++ if (!engine_wants_stop(engine)) {
++ spin_lock(&utrace->lock);
++ /*
++ * If utrace_control() came in and detached us
++ * before we got the lock, we must not stop now.
++ */
++ if (unlikely(engine->ops == &utrace_detached_ops))
++ report->detaches = true;
++ else
++ mark_engine_wants_stop(task, engine);
++ spin_unlock(&utrace->lock);
++ }
++}
++
++/*
++ * Apply the return value of one engine callback to @report.
++ * Returns true if @engine detached and should not get any more callbacks.
++ */
++static bool finish_callback(struct task_struct *task, struct utrace *utrace,
++ struct utrace_report *report,
++ struct utrace_engine *engine,
++ u32 ret)
++{
++ report->result = ret & ~UTRACE_RESUME_MASK;
++ finish_callback_report(task, utrace, report, engine,
++ utrace_resume_action(ret));
++
++ /*
++ * Now that we have applied the effect of the return value,
++ * clear this so that utrace_barrier() can stop waiting.
++ * A subsequent utrace_control() can stop or resume @engine
++ * and know this was ordered after its callback's action.
++ *
++ * We don't need any barriers here because utrace_barrier()
++ * takes utrace->lock. If we touched engine->flags above,
++ * the lock guaranteed this change was before utrace_barrier()
++ * examined utrace->reporting.
++ */
++ utrace->reporting = NULL;
++
++ /*
++ * We've just done an engine callback. These are allowed to sleep,
++ * though all well-behaved ones restrict that to blocking kalloc()
++ * or quickly-acquired mutex_lock() and the like. This is a good
++ * place to make sure tracing engines don't introduce too much
++ * latency under voluntary preemption.
++ */
++ might_sleep();
++
++ return engine->ops == &utrace_detached_ops;
++}
++
++/*
++ * Start the callbacks for @engine to consider @event (a bit mask).
++ * This makes the report_quiesce() callback first. If @engine wants
++ * a specific callback for @event, we return the ops vector to use.
++ * If not, we return NULL. The return value from the ops->callback
++ * function called should be passed to finish_callback().
++ */
++static const struct utrace_engine_ops *start_callback(
++ struct utrace *utrace, struct utrace_report *report,
++ struct utrace_engine *engine, struct task_struct *task,
++ unsigned long event)
++{
++ const struct utrace_engine_ops *ops;
++ unsigned long want;
++
++ /*
++ * This barrier ensures that we've set utrace->reporting before
++ * we examine engine->flags or engine->ops. utrace_barrier()
++ * relies on this ordering to indicate that the effect of any
++ * utrace_control() and utrace_set_events() calls is in place
++ * by the time utrace->reporting can be seen to be NULL.
++ */
++ utrace->reporting = engine;
++ smp_mb();
++
++ /*
++ * This pairs with the barrier in mark_engine_detached().
++ * It makes sure that we never see the old ops vector with
++ * the new flags, in case the original vector had no report_quiesce.
++ */
++ want = engine->flags;
++ smp_rmb();
++ ops = engine->ops;
++
++ if (want & UTRACE_EVENT(QUIESCE)) {
++ if (finish_callback(task, utrace, report, engine,
++ (*ops->report_quiesce)(report->action,
++ engine, event)))
++ return NULL;
++
++ /*
++ * finish_callback() reset utrace->reporting after the
++ * quiesce callback. Now we set it again (as above)
++ * before re-examining engine->flags, which could have
++ * been changed synchronously by ->report_quiesce or
++ * asynchronously by utrace_control() or utrace_set_events().
++ */
++ utrace->reporting = engine;
++ smp_mb();
++ want = engine->flags;
++ }
++
++ if (want & ENGINE_STOP)
++ report->action = UTRACE_STOP;
++
++ if (want & event) {
++ report->spurious = false;
++ return ops;
++ }
++
++ utrace->reporting = NULL;
++ return NULL;
++}
++
++/*
++ * Do a normal reporting pass for engines interested in @event.
++ * @callback is the name of the member in the ops vector, and remaining
++ * args are the extras it takes after the standard three args.
++ */
++#define REPORT_CALLBACKS(rev, task, utrace, report, event, callback, ...) \
++ do { \
++ struct utrace_engine *engine; \
++ const struct utrace_engine_ops *ops; \
++ list_for_each_entry##rev(engine, &utrace->attached, entry) { \
++ ops = start_callback(utrace, report, engine, task, \
++ event); \
++ if (!ops) \
++ continue; \
++ finish_callback(task, utrace, report, engine, \
++ (*ops->callback)(__VA_ARGS__)); \
++ } \
++ } while (0)
++#define REPORT(task, utrace, report, event, callback, ...) \
++ do { \
++ start_report(utrace); \
++ REPORT_CALLBACKS(, task, utrace, report, event, callback, \
++ (report)->action, engine, ## __VA_ARGS__); \
++ finish_report(task, utrace, report, true); \
++ } while (0)
++
++/*
++ * Called iff UTRACE_EVENT(EXEC) flag is set.
++ */
++void utrace_report_exec(struct linux_binfmt *fmt, struct linux_binprm *bprm,
++ struct pt_regs *regs)
++{
++ struct task_struct *task = current;
++ struct utrace *utrace = task_utrace_struct(task);
++ INIT_REPORT(report);
++
++ REPORT(task, utrace, &report, UTRACE_EVENT(EXEC),
++ report_exec, fmt, bprm, regs);
++}
++
++static u32 do_report_syscall_entry(struct pt_regs *regs,
++ struct task_struct *task,
++ struct utrace *utrace,
++ struct utrace_report *report,
++ u32 resume_report)
++{
++ start_report(utrace);
++ REPORT_CALLBACKS(_reverse, task, utrace, report,
++ UTRACE_EVENT(SYSCALL_ENTRY), report_syscall_entry,
++ resume_report | report->result | report->action,
++ engine, regs);
++ finish_report(task, utrace, report, false);
++
++ if (report->action != UTRACE_STOP)
++ return 0;
++
++ utrace_stop(task, utrace, report->resume_action);
++
++ if (fatal_signal_pending(task)) {
++ /*
++ * We are continuing despite UTRACE_STOP because of a
++ * SIGKILL. Don't let the system call actually proceed.
++ */
++ report->result = UTRACE_SYSCALL_ABORT;
++ } else if (utrace->resume <= UTRACE_REPORT) {
++ /*
++ * If we've been asked for another report after our stop,
++ * go back to report (and maybe stop) again before we run
++ * the system call. The second (and later) reports are
++ * marked with the UTRACE_SYSCALL_RESUMED flag so that
++ * engines know this is a second report at the same
++ * entry. This gives them the chance to examine the
++ * registers anew after they might have been changed
++ * while we were stopped.
++ */
++ report->detaches = false;
++ report->spurious = true;
++ report->action = report->resume_action = UTRACE_RESUME;
++ return UTRACE_SYSCALL_RESUMED;
++ }
++
++ return 0;
++}
++
++/*
++ * Called iff UTRACE_EVENT(SYSCALL_ENTRY) flag is set.
++ * Return true to prevent the system call.
++ */
++bool utrace_report_syscall_entry(struct pt_regs *regs)
++{
++ struct task_struct *task = current;
++ struct utrace *utrace = task_utrace_struct(task);
++ INIT_REPORT(report);
++ u32 resume_report = 0;
++
++ do {
++ resume_report = do_report_syscall_entry(regs, task, utrace,
++ &report, resume_report);
++ } while (resume_report);
++
++ return utrace_syscall_action(report.result) == UTRACE_SYSCALL_ABORT;
++}
++
++/*
++ * Called iff UTRACE_EVENT(SYSCALL_EXIT) flag is set.
++ */
++void utrace_report_syscall_exit(struct pt_regs *regs)
++{
++ struct task_struct *task = current;
++ struct utrace *utrace = task_utrace_struct(task);
++ INIT_REPORT(report);
++
++ REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_EXIT),
++ report_syscall_exit, regs);
++}
++
++/*
++ * Called iff UTRACE_EVENT(CLONE) flag is set.
++ * This notification call blocks the wake_up_new_task call on the child.
++ * So we must not quiesce here. tracehook_report_clone_complete will do
++ * a quiescence check momentarily.
++ */
++void utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
++{
++ struct task_struct *task = current;
++ struct utrace *utrace = task_utrace_struct(task);
++ INIT_REPORT(report);
++
++ /*
++ * We don't use the REPORT() macro here, because we need
++ * to clear utrace->cloning before finish_report().
++ * After finish_report(), utrace can be a stale pointer
++ * in cases when report.action is still UTRACE_RESUME.
++ */
++ start_report(utrace);
++ utrace->cloning = child;
++
++ REPORT_CALLBACKS(, task, utrace, &report,
++ UTRACE_EVENT(CLONE), report_clone,
++ report.action, engine, clone_flags, child);
++
++ utrace->cloning = NULL;
++ finish_report(task, utrace, &report, !(clone_flags & CLONE_VFORK));
++
++ /*
++ * For a vfork, we will go into an uninterruptible block waiting
++ * for the child. We need UTRACE_STOP to happen before this, not
++ * after. For CLONE_VFORK, utrace_finish_vfork() will be called.
++ */
++ if (report.action == UTRACE_STOP && (clone_flags & CLONE_VFORK)) {
++ spin_lock(&utrace->lock);
++ utrace->vfork_stop = 1;
++ spin_unlock(&utrace->lock);
++ }
++}
++
++/*
++ * We're called after utrace_report_clone() for a CLONE_VFORK.
++ * If UTRACE_STOP was left from the clone report, we stop here.
++ * After this, we'll enter the uninterruptible wait_for_completion()
++ * waiting for the child.
++ */
++void utrace_finish_vfork(struct task_struct *task)
++{
++ struct utrace *utrace = task_utrace_struct(task);
++
++ if (utrace->vfork_stop) {
++ spin_lock(&utrace->lock);
++ utrace->vfork_stop = 0;
++ spin_unlock(&utrace->lock);
++ utrace_stop(task, utrace, UTRACE_RESUME); /* XXX */
++ }
++}
++
++/*
++ * Called iff UTRACE_EVENT(JCTL) flag is set.
++ *
++ * Called with siglock held.
++ */
++void utrace_report_jctl(int notify, int what)
++{
++ struct task_struct *task = current;
++ struct utrace *utrace = task_utrace_struct(task);
++ INIT_REPORT(report);
++
++ spin_unlock_irq(&task->sighand->siglock);
++
++ REPORT(task, utrace, &report, UTRACE_EVENT(JCTL),
++ report_jctl, what, notify);
++
++ spin_lock_irq(&task->sighand->siglock);
++}
++
++/*
++ * Called iff UTRACE_EVENT(EXIT) flag is set.
++ */
++void utrace_report_exit(long *exit_code)
++{
++ struct task_struct *task = current;
++ struct utrace *utrace = task_utrace_struct(task);
++ INIT_REPORT(report);
++ long orig_code = *exit_code;
++
++ REPORT(task, utrace, &report, UTRACE_EVENT(EXIT),
++ report_exit, orig_code, exit_code);
++
++ if (report.action == UTRACE_STOP)
++ utrace_stop(task, utrace, report.resume_action);
++}
++
++/*
++ * Called iff UTRACE_EVENT(DEATH) or UTRACE_EVENT(QUIESCE) flag is set.
++ *
++ * It is always possible that we are racing with utrace_release_task here.
++ * For this reason, utrace_release_task checks for the event bits that get
++ * us here, and delays its cleanup for us to do.
++ */
++void utrace_report_death(struct task_struct *task, struct utrace *utrace,
++ bool group_dead, int signal)
++{
++ INIT_REPORT(report);
++
++ BUG_ON(!task->exit_state);
++
++ /*
++ * We are presently considered "quiescent"--which is accurate
++ * inasmuch as we won't run any more user instructions ever again.
++ * But for utrace_control and utrace_set_events to be robust, they
++ * must be sure whether or not we will run any more callbacks. If
++ * a call comes in before we do, taking the lock here synchronizes
++ * us so we don't run any callbacks just disabled. Calls that come
++ * in while we're running the callbacks will see the exit.death
++ * flag and know that we are not yet fully quiescent for purposes
++ * of detach bookkeeping.
++ */
++ spin_lock(&utrace->lock);
++ BUG_ON(utrace->death);
++ utrace->death = 1;
++ utrace->resume = UTRACE_RESUME;
++ splice_attaching(utrace);
++ spin_unlock(&utrace->lock);
++
++ REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH),
++ report_death, engine, group_dead, signal);
++
++ utrace_maybe_reap(task, utrace, false);
++}
++
++/*
++ * Finish the last reporting pass before returning to user mode.
++ */
++static void finish_resume_report(struct task_struct *task,
++ struct utrace *utrace,
++ struct utrace_report *report)
++{
++ finish_report_reset(task, utrace, report);
++
++ switch (report->action) {
++ case UTRACE_STOP:
++ utrace_stop(task, utrace, report->resume_action);
++ break;
++
++ case UTRACE_INTERRUPT:
++ if (!signal_pending(task))
++ set_tsk_thread_flag(task, TIF_SIGPENDING);
++ break;
++
++ case UTRACE_BLOCKSTEP:
++ if (likely(arch_has_block_step())) {
++ user_enable_block_step(task);
++ break;
++ }
++
++ /*
++ * This means some callback is to blame for failing
++ * to check arch_has_block_step() itself. Warn and
++ * then fall through to treat it as SINGLESTEP.
++ */
++ WARN(1, "UTRACE_BLOCKSTEP when !arch_has_block_step()");
++
++ case UTRACE_SINGLESTEP:
++ if (likely(arch_has_single_step())) {
++ user_enable_single_step(task);
++ } else {
++ /*
++ * This means some callback is to blame for failing
++ * to check arch_has_single_step() itself. Spew
++ * about it so the loser will fix his module.
++ */
++ WARN(1,
++ "UTRACE_SINGLESTEP when !arch_has_single_step()");
++ }
++ break;
++
++ case UTRACE_REPORT:
++ case UTRACE_RESUME:
++ default:
++ user_disable_single_step(task);
++ break;
++ }
++}
++
++/*
++ * This is called when TIF_NOTIFY_RESUME had been set (and is now clear).
++ * We are close to user mode, and this is the place to report or stop.
++ * When we return, we're going to user mode or into the signals code.
++ */
++void utrace_resume(struct task_struct *task, struct pt_regs *regs)
++{
++ struct utrace *utrace = task_utrace_struct(task);
++ INIT_REPORT(report);
++ struct utrace_engine *engine;
++
++ /*
++ * Some machines get here with interrupts disabled. The same arch
++ * code path leads to calling into get_signal_to_deliver(), which
++ * implicitly reenables them by virtue of spin_unlock_irq.
++ */
++ local_irq_enable();
++
++ /*
++ * If this flag is still set it's because there was a signal
++ * handler setup done but no report_signal following it. Clear
++ * the flag before we get to user so it doesn't confuse us later.
++ */
++ if (unlikely(utrace->signal_handler)) {
++ spin_lock(&utrace->lock);
++ utrace->signal_handler = 0;
++ spin_unlock(&utrace->lock);
++ }
++
++ /*
++ * Update our bookkeeping even if there are no callbacks made here.
++ */
++ report.action = start_report(utrace);
++
++ switch (report.action) {
++ case UTRACE_RESUME:
++ /*
++ * Anything we might have done was already handled by
++ * utrace_get_signal(), or this is an entirely spurious
++ * call. (The arch might use TIF_NOTIFY_RESUME for other
++ * purposes as well as calling us.)
++ */
++ return;
++ case UTRACE_REPORT:
++ if (unlikely(!(task->utrace_flags & UTRACE_EVENT(QUIESCE))))
++ break;
++ /*
++ * Do a simple reporting pass, with no specific
++ * callback after report_quiesce.
++ */
++ report.action = UTRACE_RESUME;
++ list_for_each_entry(engine, &utrace->attached, entry)
++ start_callback(utrace, &report, engine, task, 0);
++ break;
++ default:
++ /*
++ * Even if this report was truly spurious, there is no need
++ * for utrace_reset() now. TIF_NOTIFY_RESUME was already
++ * cleared--it doesn't stay spuriously set.
++ */
++ report.spurious = false;
++ break;
++ }
++
++ /*
++ * Finish the report and either stop or get ready to resume.
++ * If utrace->resume was not UTRACE_REPORT, this applies its
++ * effect now (i.e. step or interrupt).
++ */
++ finish_resume_report(task, utrace, &report);
++}
++
++/*
++ * Return true if current has forced signal_pending().
++ *
++ * This is called only when current->utrace_flags is nonzero, so we know
++ * that current->utrace must be set. It's not inlined in tracehook.h
++ * just so that struct utrace can stay opaque outside this file.
++ */
++bool utrace_interrupt_pending(void)
++{
++ return task_utrace_struct(current)->resume == UTRACE_INTERRUPT;
++}
++
++/*
++ * Take the siglock and push @info back on our queue.
++ * Returns with @task->sighand->siglock held.
++ */
++static void push_back_signal(struct task_struct *task, siginfo_t *info)
++ __acquires(task->sighand->siglock)
++{
++ struct sigqueue *q;
++
++ if (unlikely(!info->si_signo)) { /* Oh, a wise guy! */
++ spin_lock_irq(&task->sighand->siglock);
++ return;
++ }
++
++ q = sigqueue_alloc();
++ if (likely(q)) {
++ q->flags = 0;
++ copy_siginfo(&q->info, info);
++ }
++
++ spin_lock_irq(&task->sighand->siglock);
++
++ sigaddset(&task->pending.signal, info->si_signo);
++ if (likely(q))
++ list_add(&q->list, &task->pending.list);
++
++ set_tsk_thread_flag(task, TIF_SIGPENDING);
++}
++
++/*
++ * This is the hook from the signals code, called with the siglock held.
++ * Here is the ideal place to stop. We also dequeue and intercept signals.
++ */
++int utrace_get_signal(struct task_struct *task, struct pt_regs *regs,
++ siginfo_t *info, struct k_sigaction *return_ka)
++ __releases(task->sighand->siglock)
++ __acquires(task->sighand->siglock)
++{
++ struct utrace *utrace;
++ struct k_sigaction *ka;
++ INIT_REPORT(report);
++ struct utrace_engine *engine;
++ const struct utrace_engine_ops *ops;
++ unsigned long event, want;
++ u32 ret;
++ int signr;
++
++ utrace = task_utrace_struct(task);
++ if (utrace->resume < UTRACE_RESUME ||
++ utrace->pending_attach || utrace->signal_handler) {
++ enum utrace_resume_action resume;
++
++ /*
++ * We've been asked for an explicit report before we
++ * even check for pending signals.
++ */
++
++ spin_unlock_irq(&task->sighand->siglock);
++
++ spin_lock(&utrace->lock);
++
++ splice_attaching(utrace);
++
++ report.result = utrace->signal_handler ?
++ UTRACE_SIGNAL_HANDLER : UTRACE_SIGNAL_REPORT;
++ utrace->signal_handler = 0;
++
++ resume = utrace->resume;
++ utrace->resume = UTRACE_RESUME;
++
++ spin_unlock(&utrace->lock);
++
++ /*
++ * Make sure signal_pending() only returns true
++ * if there are real signals pending.
++ */
++ if (signal_pending(task)) {
++ spin_lock_irq(&task->sighand->siglock);
++ recalc_sigpending();
++ spin_unlock_irq(&task->sighand->siglock);
++ }
++
++ if (resume > UTRACE_REPORT) {
++ /*
++ * We only got here to process utrace->resume.
++ * Despite no callbacks, this report is not spurious.
++ */
++ report.action = resume;
++ report.spurious = false;
++ finish_resume_report(task, utrace, &report);
++ return -1;
++ } else if (!(task->utrace_flags & UTRACE_EVENT(QUIESCE))) {
++ /*
++ * We only got here to clear utrace->signal_handler.
++ */
++ return -1;
++ }
++
++ /*
++ * Do a reporting pass for no signal, just for EVENT(QUIESCE).
++ * The engine callbacks can fill in *info and *return_ka.
++ * We'll pass NULL for the @orig_ka argument to indicate
++ * that there was no original signal.
++ */
++ event = 0;
++ ka = NULL;
++ memset(return_ka, 0, sizeof *return_ka);
++ } else if (!(task->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) ||
++ unlikely(task->signal->group_stop_count)) {
++ /*
++ * If no engine is interested in intercepting signals or
++ * we must stop, let the caller just dequeue them normally
++ * or participate in group-stop.
++ */
++ return 0;
++ } else {
++ /*
++ * Steal the next signal so we can let tracing engines
++ * examine it. From the signal number and sigaction,
++ * determine what normal delivery would do. If no
++ * engine perturbs it, we'll do that by returning the
++ * signal number after setting *return_ka.
++ */
++ signr = dequeue_signal(task, &task->blocked, info);
++ if (signr == 0)
++ return signr;
++ BUG_ON(signr != info->si_signo);
++
++ ka = &task->sighand->action[signr - 1];
++ *return_ka = *ka;
++
++ /*
++ * We are never allowed to interfere with SIGKILL.
++ * Just punt after filling in *return_ka for our caller.
++ */
++ if (signr == SIGKILL)
++ return signr;
++
++ if (ka->sa.sa_handler == SIG_IGN) {
++ event = UTRACE_EVENT(SIGNAL_IGN);
++ report.result = UTRACE_SIGNAL_IGN;
++ } else if (ka->sa.sa_handler != SIG_DFL) {
++ event = UTRACE_EVENT(SIGNAL);
++ report.result = UTRACE_SIGNAL_DELIVER;
++ } else if (sig_kernel_coredump(signr)) {
++ event = UTRACE_EVENT(SIGNAL_CORE);
++ report.result = UTRACE_SIGNAL_CORE;
++ } else if (sig_kernel_ignore(signr)) {
++ event = UTRACE_EVENT(SIGNAL_IGN);
++ report.result = UTRACE_SIGNAL_IGN;
++ } else if (signr == SIGSTOP) {
++ event = UTRACE_EVENT(SIGNAL_STOP);
++ report.result = UTRACE_SIGNAL_STOP;
++ } else if (sig_kernel_stop(signr)) {
++ event = UTRACE_EVENT(SIGNAL_STOP);
++ report.result = UTRACE_SIGNAL_TSTP;
++ } else {
++ event = UTRACE_EVENT(SIGNAL_TERM);
++ report.result = UTRACE_SIGNAL_TERM;
++ }
++
++ /*
++ * Now that we know what event type this signal is, we
++ * can short-circuit if no engines care about those.
++ */
++ if ((task->utrace_flags & (event | UTRACE_EVENT(QUIESCE))) == 0)
++ return signr;
++
++ /*
++ * We have some interested engines, so tell them about
++ * the signal and let them change its disposition.
++ */
++ spin_unlock_irq(&task->sighand->siglock);
++ }
++
++ /*
++ * This reporting pass chooses what signal disposition we'll act on.
++ */
++ list_for_each_entry(engine, &utrace->attached, entry) {
++ /*
++ * See start_callback() comment about this barrier.
++ */
++ utrace->reporting = engine;
++ smp_mb();
++
++ /*
++ * This pairs with the barrier in mark_engine_detached(),
++ * see start_callback() comments.
++ */
++ want = engine->flags;
++ smp_rmb();
++ ops = engine->ops;
++
++ if ((want & (event | UTRACE_EVENT(QUIESCE))) == 0) {
++ utrace->reporting = NULL;
++ continue;
++ }
++
++ if (ops->report_signal)
++ ret = (*ops->report_signal)(
++ report.result | report.action, engine,
++ regs, info, ka, return_ka);
++ else
++ ret = (report.result | (*ops->report_quiesce)(
++ report.action, engine, event));
++
++ /*
++ * Avoid a tight loop reporting again and again if some
++ * engine is too stupid.
++ */
++ switch (utrace_resume_action(ret)) {
++ default:
++ break;
++ case UTRACE_INTERRUPT:
++ case UTRACE_REPORT:
++ ret = (ret & ~UTRACE_RESUME_MASK) | UTRACE_RESUME;
++ break;
++ }
++
++ finish_callback(task, utrace, &report, engine, ret);
++ }
++
++ /*
++ * We express the chosen action to the signals code in terms
++ * of a representative signal whose default action does it.
++ * Our caller uses our return value (signr) to decide what to
++ * do, but uses info->si_signo as the signal number to report.
++ */
++ switch (utrace_signal_action(report.result)) {
++ case UTRACE_SIGNAL_TERM:
++ signr = SIGTERM;
++ break;
++
++ case UTRACE_SIGNAL_CORE:
++ signr = SIGQUIT;
++ break;
++
++ case UTRACE_SIGNAL_STOP:
++ signr = SIGSTOP;
++ break;
++
++ case UTRACE_SIGNAL_TSTP:
++ signr = SIGTSTP;
++ break;
++
++ case UTRACE_SIGNAL_DELIVER:
++ signr = info->si_signo;
++
++ if (return_ka->sa.sa_handler == SIG_DFL) {
++ /*
++ * We'll do signr's normal default action.
++ * For ignore, we'll fall through below.
++ * For stop/death, break locks and returns it.
++ */
++ if (likely(signr) && !sig_kernel_ignore(signr))
++ break;
++ } else if (return_ka->sa.sa_handler != SIG_IGN &&
++ likely(signr)) {
++ /*
++ * Complete the bookkeeping after the report.
++ * The handler will run. If an engine wanted to
++ * stop or step, then make sure we do another
++ * report after signal handler setup.
++ */
++ if (report.action != UTRACE_RESUME)
++ report.action = UTRACE_INTERRUPT;
++ finish_report(task, utrace, &report, true);
++
++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD))
++ push_back_signal(task, info);
++ else
++ spin_lock_irq(&task->sighand->siglock);
++
++ /*
++ * We do the SA_ONESHOT work here since the
++ * normal path will only touch *return_ka now.
++ */
++ if (unlikely(return_ka->sa.sa_flags & SA_ONESHOT)) {
++ return_ka->sa.sa_flags &= ~SA_ONESHOT;
++ if (likely(valid_signal(signr))) {
++ ka = &task->sighand->action[signr - 1];
++ ka->sa.sa_handler = SIG_DFL;
++ }
++ }
++
++ return signr;
++ }
++
++ /* Fall through for an ignored signal. */
++
++ case UTRACE_SIGNAL_IGN:
++ case UTRACE_SIGNAL_REPORT:
++ default:
++ /*
++ * If the signal is being ignored, then we are on the way
++ * directly back to user mode. We can stop here, or step,
++ * as in utrace_resume(), above. After we've dealt with that,
++ * our caller will relock and come back through here.
++ */
++ finish_resume_report(task, utrace, &report);
++
++ if (unlikely(fatal_signal_pending(task))) {
++ /*
++ * The only reason we woke up now was because of a
++ * SIGKILL. Don't do normal dequeuing in case it
++ * might get a signal other than SIGKILL. That would
++ * perturb the death state so it might differ from
++ * what the debugger would have allowed to happen.
++ * Instead, pluck out just the SIGKILL to be sure
++ * we'll die immediately with nothing else different
++ * from the quiescent state the debugger wanted us in.
++ */
++ sigset_t sigkill_only;
++ siginitsetinv(&sigkill_only, sigmask(SIGKILL));
++ spin_lock_irq(&task->sighand->siglock);
++ signr = dequeue_signal(task, &sigkill_only, info);
++ BUG_ON(signr != SIGKILL);
++ *return_ka = task->sighand->action[SIGKILL - 1];
++ return signr;
++ }
++
++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) {
++ push_back_signal(task, info);
++ spin_unlock_irq(&task->sighand->siglock);
++ }
++
++ return -1;
++ }
++
++ /*
++ * Complete the bookkeeping after the report.
++ * This sets utrace->resume if UTRACE_STOP was used.
++ */
++ finish_report(task, utrace, &report, true);
++
++ return_ka->sa.sa_handler = SIG_DFL;
++
++ /*
++ * If this signal is fatal, si_signo gets through as exit_code.
++ * We can't allow a completely bogus value there or else core
++ * kernel code can freak out. (If an engine wants to control
++ * the exit_code value exactly, it can do so in report_exit.)
++ * We'll produce a big complaint in dmesg, but won't crash.
++ * That's nicer for debugging your utrace engine.
++ */
++ if (unlikely(info->si_signo & 0x80)) {
++ WARN(1, "utrace engine left bogus si_signo value!");
++ info->si_signo = SIGTRAP;
++ }
++
++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD))
++ push_back_signal(task, info);
++ else
++ spin_lock_irq(&task->sighand->siglock);
++
++ if (sig_kernel_stop(signr))
++ task->signal->flags |= SIGNAL_STOP_DEQUEUED;
++
++ return signr;
++}
++
++/*
++ * This gets called after a signal handler has been set up.
++ * We set a flag so the next report knows it happened.
++ * If we're already stepping, make sure we do a report_signal.
++ * If not, make sure we get into utrace_resume() where we can
++ * clear the signal_handler flag before resuming.
++ */
++void utrace_signal_handler(struct task_struct *task, int stepping)
++{
++ struct utrace *utrace = task_utrace_struct(task);
++
++ spin_lock(&utrace->lock);
++
++ utrace->signal_handler = 1;
++ if (utrace->resume > UTRACE_INTERRUPT) {
++ if (stepping) {
++ utrace->resume = UTRACE_INTERRUPT;
++ set_tsk_thread_flag(task, TIF_SIGPENDING);
++ } else if (utrace->resume == UTRACE_RESUME) {
++ set_tsk_thread_flag(task, TIF_NOTIFY_RESUME);
++ }
++ }
++
++ spin_unlock(&utrace->lock);
++}
++
++/**
++ * utrace_prepare_examine - prepare to examine thread state
++ * @target: thread of interest, a &struct task_struct pointer
++ * @engine: engine pointer returned by utrace_attach_task()
++ * @exam: temporary state, a &struct utrace_examiner pointer
++ *
++ * This call prepares to safely examine the thread @target using
++ * &struct user_regset calls, or direct access to thread-synchronous fields.
++ *
++ * When @target is current, this call is superfluous. When @target is
++ * another thread, it must be held stopped via %UTRACE_STOP by @engine.
++ *
++ * This call may block the caller until @target stays stopped, so it must
++ * be called only after the caller is sure @target is about to unschedule.
++ * This means a zero return from a utrace_control() call on @engine giving
++ * %UTRACE_STOP, or a report_quiesce() or report_signal() callback to
++ * @engine that used %UTRACE_STOP in its return value.
++ *
++ * Returns -%ESRCH if @target is dead or -%EINVAL if %UTRACE_STOP was
++ * not used. If @target has started running again despite %UTRACE_STOP
++ * (for %SIGKILL or a spurious wakeup), this call returns -%EAGAIN.
++ *
++ * When this call returns zero, it's safe to use &struct user_regset
++ * calls and task_user_regset_view() on @target and to examine some of
++ * its fields directly. When the examination is complete, a
++ * utrace_finish_examine() call must follow to check whether it was
++ * completed safely.
++ */
++int utrace_prepare_examine(struct task_struct *target,
++ struct utrace_engine *engine,
++ struct utrace_examiner *exam)
++{
++ int ret = 0;
++
++ if (unlikely(target == current))
++ return 0;
++
++ rcu_read_lock();
++ if (unlikely(!engine_wants_stop(engine)))
++ ret = -EINVAL;
++ else if (unlikely(target->exit_state))
++ ret = -ESRCH;
++ else {
++ exam->state = target->state;
++ if (unlikely(exam->state == TASK_RUNNING))
++ ret = -EAGAIN;
++ else
++ get_task_struct(target);
++ }
++ rcu_read_unlock();
++
++ if (likely(!ret)) {
++ exam->ncsw = wait_task_inactive(target, exam->state);
++ put_task_struct(target);
++ if (unlikely(!exam->ncsw))
++ ret = -EAGAIN;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(utrace_prepare_examine);
++
++/**
++ * utrace_finish_examine - complete an examination of thread state
++ * @target: thread of interest, a &struct task_struct pointer
++ * @engine: engine pointer returned by utrace_attach_task()
++ * @exam: pointer passed to utrace_prepare_examine() call
++ *
++ * This call completes an examination on the thread @target begun by a
++ * paired utrace_prepare_examine() call with the same arguments that
++ * returned success (zero).
++ *
++ * When @target is current, this call is superfluous. When @target is
++ * another thread, this returns zero if @target has remained unscheduled
++ * since the paired utrace_prepare_examine() call returned zero.
++ *
++ * When this returns an error, any examination done since the paired
++ * utrace_prepare_examine() call is unreliable and the data extracted
++ * should be discarded. The error is -%EINVAL if @engine is not
++ * keeping @target stopped, or -%EAGAIN if @target woke up unexpectedly.
++ */
++int utrace_finish_examine(struct task_struct *target,
++ struct utrace_engine *engine,
++ struct utrace_examiner *exam)
++{
++ int ret = 0;
++
++ if (unlikely(target == current))
++ return 0;
++
++ rcu_read_lock();
++ if (unlikely(!engine_wants_stop(engine)))
++ ret = -EINVAL;
++ else if (unlikely(target->state != exam->state))
++ ret = -EAGAIN;
++ else
++ get_task_struct(target);
++ rcu_read_unlock();
++
++ if (likely(!ret)) {
++ unsigned long ncsw = wait_task_inactive(target, exam->state);
++ if (unlikely(ncsw != exam->ncsw))
++ ret = -EAGAIN;
++ put_task_struct(target);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(utrace_finish_examine);
++
++/*
++ * This is declared in linux/regset.h and defined in machine-dependent
++ * code. We put the export here to ensure no machine forgets it.
++ */
++EXPORT_SYMBOL_GPL(task_user_regset_view);
++
++/*
++ * Called with rcu_read_lock() held.
++ */
++void task_utrace_proc_status(struct seq_file *m, struct task_struct *p)
++{
++ seq_printf(m, "Utrace:\t%lx\n", p->utrace_flags);
++}
diff --git a/freed-ora/current/F-12/linux-2.6-v4l-dvb-add-kworld-a340-support.patch b/freed-ora/current/F-12/linux-2.6-v4l-dvb-add-kworld-a340-support.patch
new file mode 100644
index 000000000..196e4da1c
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-v4l-dvb-add-kworld-a340-support.patch
@@ -0,0 +1,151 @@
+[PATCH] dvb: add support for kworld 340u and ub435-q to em28xx-dvb
+
+This adds support for the KWorld PlusTV 340U and KWorld UB345-Q ATSC
+sticks, which are really the same device. The sticks have an eMPIA
+em2870 usb bridge chipset, an LG Electronics LGDT3304 ATSC/QAM
+demodulator and an NXP TDA18271HD tuner -- early versions of the 340U
+have a a TDA18271HD/C1, later models and the UB435-Q have a C2.
+
+The stick has been tested succesfully with both VSB_8 and QAM_256 signals.
+Its using lgdt3304 support added to the lgdt3305 driver by a prior patch,
+rather than the current lgdt3304 driver, as its severely lacking in
+functionality by comparison (see said patch for details).
+
+Signed-off-by: Jarod Wilson <jarod@redhat.com>
+
+---
+Index: linux-2.6.32.noarch/drivers/media/video/em28xx/em28xx-cards.c
+===================================================================
+--- linux-2.6.32.noarch.orig/drivers/media/video/em28xx/em28xx-cards.c
++++ linux-2.6.32.noarch/drivers/media/video/em28xx/em28xx-cards.c
+@@ -157,6 +157,22 @@ static struct em28xx_reg_seq evga_indtub
+ { -1, -1, -1, -1},
+ };
+
++/*
++ * KWorld PlusTV 340U and UB435-Q (ATSC) GPIOs map:
++ * EM_GPIO_0 - currently unknown
++ * EM_GPIO_1 - LED disable/enable (1 = off, 0 = on)
++ * EM_GPIO_2 - currently unknown
++ * EM_GPIO_3 - currently unknown
++ * EM_GPIO_4 - TDA18271HD/C1 tuner (1 = active, 0 = in reset)
++ * EM_GPIO_5 - LGDT3304 ATSC/QAM demod (1 = active, 0 = in reset)
++ * EM_GPIO_6 - currently unknown
++ * EM_GPIO_7 - currently unknown
++ */
++static struct em28xx_reg_seq kworld_a340_digital[] = {
++ {EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10},
++ { -1, -1, -1, -1},
++};
++
+ /* Pinnacle Hybrid Pro eb1a:2881 */
+ static struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = {
+ {EM28XX_R08_GPIO, 0xfd, ~EM_GPIO_4, 10},
+@@ -1587,6 +1603,16 @@ struct em28xx_board em28xx_boards[] = {
+ .tuner_gpio = reddo_dvb_c_usb_box,
+ .has_dvb = 1,
+ },
++ /* 1b80:a340 - Empia EM2870, NXP TDA18271HD and LG DT3304, sold
++ * initially as the KWorld PlusTV 340U, then as the UB435-Q.
++ * Early variants have a TDA18271HD/C1, later ones a TDA18271HD/C2 */
++ [EM2870_BOARD_KWORLD_A340] = {
++ .name = "KWorld PlusTV 340U or UB435-Q (ATSC)",
++ .tuner_type = TUNER_ABSENT, /* Digital-only TDA18271HD */
++ .has_dvb = 1,
++ .dvb_gpio = kworld_a340_digital,
++ .tuner_gpio = default_tuner_gpio,
++ },
+ };
+ const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
+
+@@ -1704,6 +1730,8 @@ struct usb_device_id em28xx_id_table[] =
+ .driver_info = EM2820_BOARD_IODATA_GVMVP_SZ },
+ { USB_DEVICE(0xeb1a, 0x50a6),
+ .driver_info = EM2860_BOARD_GADMEI_UTV330 },
++ { USB_DEVICE(0x1b80, 0xa340),
++ .driver_info = EM2870_BOARD_KWORLD_A340 },
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, em28xx_id_table);
+Index: linux-2.6.32.noarch/drivers/media/video/em28xx/em28xx-dvb.c
+===================================================================
+--- linux-2.6.32.noarch.orig/drivers/media/video/em28xx/em28xx-dvb.c
++++ linux-2.6.32.noarch/drivers/media/video/em28xx/em28xx-dvb.c
+@@ -29,11 +29,13 @@
+ #include "tuner-simple.h"
+
+ #include "lgdt330x.h"
++#include "lgdt3305.h"
+ #include "zl10353.h"
+ #include "s5h1409.h"
+ #include "mt352.h"
+ #include "mt352_priv.h" /* FIXME */
+ #include "tda1002x.h"
++#include "tda18271.h"
+
+ MODULE_DESCRIPTION("driver for em28xx based DVB cards");
+ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+@@ -230,6 +232,18 @@ static struct lgdt330x_config em2880_lgd
+ .demod_chip = LGDT3303,
+ };
+
++static struct lgdt3305_config em2870_lgdt3304_dev = {
++ .i2c_addr = 0x0e,
++ .demod_chip = LGDT3304,
++ .spectral_inversion = 1,
++ .deny_i2c_rptr = 1,
++ .mpeg_mode = LGDT3305_MPEG_PARALLEL,
++ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
++ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
++ .vsb_if_khz = 3250,
++ .qam_if_khz = 4000,
++};
++
+ static struct zl10353_config em28xx_zl10353_with_xc3028 = {
+ .demod_address = (0x1e >> 1),
+ .no_tuner = 1,
+@@ -246,6 +260,17 @@ static struct s5h1409_config em28xx_s5h1
+ .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
+ };
+
++static struct tda18271_std_map kworld_a340_std_map = {
++ .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 0,
++ .if_lvl = 1, .rfagc_top = 0x37, },
++ .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 1,
++ .if_lvl = 1, .rfagc_top = 0x37, },
++};
++
++static struct tda18271_config kworld_a340_config = {
++ .std_map = &kworld_a340_std_map,
++};
++
+ static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = {
+ .demod_address = (0x1e >> 1),
+ .no_tuner = 1,
+@@ -568,6 +593,14 @@ static int dvb_init(struct em28xx *dev)
+ }
+ }
+ break;
++ case EM2870_BOARD_KWORLD_A340:
++ dvb->frontend = dvb_attach(lgdt3305_attach,
++ &em2870_lgdt3304_dev,
++ &dev->i2c_adap);
++ if (dvb->frontend != NULL)
++ dvb_attach(tda18271_attach, dvb->frontend, 0x60,
++ &dev->i2c_adap, &kworld_a340_config);
++ break;
+ default:
+ printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card"
+ " isn't supported yet\n",
+Index: linux-2.6.32.noarch/drivers/media/video/em28xx/em28xx.h
+===================================================================
+--- linux-2.6.32.noarch.orig/drivers/media/video/em28xx/em28xx.h
++++ linux-2.6.32.noarch/drivers/media/video/em28xx/em28xx.h
+@@ -110,6 +110,7 @@
+ #define EM2820_BOARD_SILVERCREST_WEBCAM 71
+ #define EM2861_BOARD_GADMEI_UTV330PLUS 72
+ #define EM2870_BOARD_REDDO_DVB_C_USB_BOX 73
++#define EM2870_BOARD_KWORLD_A340 76
+
+ /* Limits minimum and default number of buffers */
+ #define EM28XX_MIN_BUF 4
diff --git a/freed-ora/current/F-12/linux-2.6-v4l-dvb-add-lgdt3304-support.patch b/freed-ora/current/F-12/linux-2.6-v4l-dvb-add-lgdt3304-support.patch
new file mode 100644
index 000000000..30c50434f
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-v4l-dvb-add-lgdt3304-support.patch
@@ -0,0 +1,350 @@
+From b71e18093e2e7f240797875c50c49552722f8825 Mon Sep 17 00:00:00 2001
+From: Jarod Wilson <jarod@redhat.com>
+Date: Mon, 15 Feb 2010 17:13:25 -0500
+Subject: [PATCH 1/2] dvb: add lgdt3304 support to lgdt3305 driver
+
+There's a currently-unused lgdt3304 demod driver, which leaves a lot to
+be desired as far as functionality. The 3304 is unsurprisingly quite
+similar to the 3305, and empirical testing yeilds far better results
+and more complete functionality by merging 3304 support into the 3305
+driver. (For example, the current lgdt3304 driver lacks support for
+signal strength, snr, ucblocks, etc., which we get w/the lgdt3305).
+
+For the moment, not dropping the lgdt3304 driver, and its still up to
+a given device's config setup to choose which demod driver to use, but
+I'd suggest dropping the 3304 driver entirely.
+
+As a follow-up to this patch, I've got another patch that adds support
+for the KWorld PlusTV 340U (ATSC) em2870-based tuner stick, driving
+its lgdt3304 demod via this lgdt3305 driver, which is what I used to
+successfully test this patch with both VSB_8 and QAM_256 signals.
+
+A few pieces are still a touch crude, but I think its a solid start,
+as well as much cleaner and more feature-complete than the existing
+lgdt3304 driver.
+
+Signed-off-by: Jarod Wilson <jarod@redhat.com>
+---
+ drivers/media/dvb/frontends/lgdt3305.c | 206 ++++++++++++++++++++++++++++++--
+ drivers/media/dvb/frontends/lgdt3305.h | 6 +
+ 2 files changed, 203 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/media/dvb/frontends/lgdt3305.c b/drivers/media/dvb/frontends/lgdt3305.c
+index fde8c59..40695e6 100644
+--- a/drivers/media/dvb/frontends/lgdt3305.c
++++ b/drivers/media/dvb/frontends/lgdt3305.c
+@@ -1,5 +1,5 @@
+ /*
+- * Support for LGDT3305 - VSB/QAM
++ * Support for LG Electronics LGDT3304 and LGDT3305 - VSB/QAM
+ *
+ * Copyright (C) 2008, 2009 Michael Krufky <mkrufky@linuxtv.org>
+ *
+@@ -357,7 +357,10 @@ static int lgdt3305_rfagc_loop(struct lgdt3305_state *state,
+ case QAM_256:
+ agcdelay = 0x046b;
+ rfbw = 0x8889;
+- ifbw = 0x8888;
++ if (state->cfg->demod_chip == LGDT3305)
++ ifbw = 0x8888;
++ else
++ ifbw = 0x6666;
+ break;
+ default:
+ return -EINVAL;
+@@ -409,8 +412,18 @@ static int lgdt3305_agc_setup(struct lgdt3305_state *state,
+ lg_dbg("lockdten = %d, acqen = %d\n", lockdten, acqen);
+
+ /* control agc function */
+- lgdt3305_write_reg(state, LGDT3305_AGC_CTRL_4, 0xe1 | lockdten << 1);
+- lgdt3305_set_reg_bit(state, LGDT3305_AGC_CTRL_1, 2, acqen);
++ switch (state->cfg->demod_chip) {
++ case LGDT3304:
++ lgdt3305_write_reg(state, 0x0314, 0xe1 | lockdten << 1);
++ lgdt3305_set_reg_bit(state, 0x030e, 2, acqen);
++ break;
++ case LGDT3305:
++ lgdt3305_write_reg(state, LGDT3305_AGC_CTRL_4, 0xe1 | lockdten << 1);
++ lgdt3305_set_reg_bit(state, LGDT3305_AGC_CTRL_1, 2, acqen);
++ break;
++ default:
++ return -EINVAL;
++ }
+
+ return lgdt3305_rfagc_loop(state, param);
+ }
+@@ -543,6 +556,11 @@ static int lgdt3305_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+ enable ? 0 : 1);
+ }
+
++static int lgdt3304_sleep(struct dvb_frontend *fe)
++{
++ return 0;
++}
++
+ static int lgdt3305_sleep(struct dvb_frontend *fe)
+ {
+ struct lgdt3305_state *state = fe->demodulator_priv;
+@@ -571,6 +589,55 @@ static int lgdt3305_sleep(struct dvb_frontend *fe)
+ return 0;
+ }
+
++static int lgdt3304_init(struct dvb_frontend *fe)
++{
++ struct lgdt3305_state *state = fe->demodulator_priv;
++ int ret;
++
++ static struct lgdt3305_reg lgdt3304_init_data[] = {
++ { .reg = LGDT3305_GEN_CTRL_1, .val = 0x03, },
++ { .reg = 0x000d, .val = 0x02, },
++ { .reg = 0x000e, .val = 0x02, },
++ { .reg = LGDT3305_DGTL_AGC_REF_1, .val = 0x32, },
++ { .reg = LGDT3305_DGTL_AGC_REF_2, .val = 0xc4, },
++ { .reg = LGDT3305_CR_CTR_FREQ_1, .val = 0x00, },
++ { .reg = LGDT3305_CR_CTR_FREQ_2, .val = 0x00, },
++ { .reg = LGDT3305_CR_CTR_FREQ_3, .val = 0x00, },
++ { .reg = LGDT3305_CR_CTR_FREQ_4, .val = 0x00, },
++ { .reg = LGDT3305_CR_CTRL_7, .val = 0xf9, },
++ { .reg = 0x0112, .val = 0x17, },
++ { .reg = 0x0113, .val = 0x15, },
++ { .reg = 0x0114, .val = 0x18, },
++ { .reg = 0x0115, .val = 0xff, },
++ { .reg = 0x0116, .val = 0x3c, },
++ { .reg = 0x0214, .val = 0x67, },
++ { .reg = 0x0424, .val = 0x8d, },
++ { .reg = 0x0427, .val = 0x12, },
++ { .reg = 0x0428, .val = 0x4f, },
++ { .reg = LGDT3305_IFBW_1, .val = 0x80, },
++ { .reg = LGDT3305_IFBW_2, .val = 0x00, },
++ { .reg = 0x030a, .val = 0x08, },
++ { .reg = 0x030b, .val = 0x9b, },
++ { .reg = 0x030d, .val = 0x00, },
++ { .reg = 0x030e, .val = 0x1c, },
++ { .reg = 0x0314, .val = 0xe1, },
++ { .reg = 0x000d, .val = 0x82, },
++ { .reg = LGDT3305_TP_CTRL_1, .val = 0x5b, },
++ { .reg = LGDT3305_TP_CTRL_1, .val = 0x5b, },
++ };
++
++ lg_dbg("\n");
++
++ ret = lgdt3305_write_regs(state, lgdt3304_init_data,
++ ARRAY_SIZE(lgdt3304_init_data));
++ if (lg_fail(ret))
++ goto fail;
++
++ ret = lgdt3305_soft_reset(state);
++fail:
++ return ret;
++}
++
+ static int lgdt3305_init(struct dvb_frontend *fe)
+ {
+ struct lgdt3305_state *state = fe->demodulator_priv;
+@@ -639,6 +706,88 @@ fail:
+ return ret;
+ }
+
++static int lgdt3304_set_parameters(struct dvb_frontend *fe,
++ struct dvb_frontend_parameters *param)
++{
++ struct lgdt3305_state *state = fe->demodulator_priv;
++ int ret;
++
++ lg_dbg("(%d, %d)\n", param->frequency, param->u.vsb.modulation);
++
++ if (fe->ops.tuner_ops.set_params) {
++ ret = fe->ops.tuner_ops.set_params(fe, param);
++ if (fe->ops.i2c_gate_ctrl)
++ fe->ops.i2c_gate_ctrl(fe, 0);
++ if (lg_fail(ret))
++ goto fail;
++ state->current_frequency = param->frequency;
++ }
++
++ ret = lgdt3305_set_modulation(state, param);
++ if (lg_fail(ret))
++ goto fail;
++
++ ret = lgdt3305_passband_digital_agc(state, param);
++ if (lg_fail(ret))
++ goto fail;
++
++ ret = lgdt3305_agc_setup(state, param);
++ if (lg_fail(ret))
++ goto fail;
++
++ /* reg 0x030d is 3304-only... seen in vsb and qam usbsnoops... */
++ switch (param->u.vsb.modulation) {
++ case VSB_8:
++ lgdt3305_write_reg(state, 0x030d, 0x00);
++#if 1
++ lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_1, 0x4f);
++ lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_2, 0x0c);
++ lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_3, 0xac);
++ lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_4, 0xba);
++#endif
++ break;
++ case QAM_64:
++ case QAM_256:
++ lgdt3305_write_reg(state, 0x030d, 0x14);
++#if 1
++ ret = lgdt3305_set_if(state, param);
++ if (lg_fail(ret))
++ goto fail;
++#endif
++ break;
++ default:
++ return -EINVAL;
++ }
++
++#if 0
++ /* the set_if vsb formula doesn't work for the 3304, we end up sending
++ * 0x40851e07 instead of 0x4f0cacba (which works back to 94050, rather
++ * than 3250, in the case of the kworld 340u) */
++ ret = lgdt3305_set_if(state, param);
++ if (lg_fail(ret))
++ goto fail;
++#endif
++
++ ret = lgdt3305_spectral_inversion(state, param,
++ state->cfg->spectral_inversion
++ ? 1 : 0);
++ if (lg_fail(ret))
++ goto fail;
++
++ state->current_modulation = param->u.vsb.modulation;
++
++ ret = lgdt3305_mpeg_mode(state, state->cfg->mpeg_mode);
++ if (lg_fail(ret))
++ goto fail;
++
++ /* lgdt3305_mpeg_mode_polarity calls lgdt3305_soft_reset */
++ ret = lgdt3305_mpeg_mode_polarity(state,
++ state->cfg->tpclk_edge,
++ state->cfg->tpvalid_polarity);
++fail:
++ return ret;
++}
++
+ static int lgdt3305_set_parameters(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *param)
+ {
+@@ -847,6 +996,10 @@ static int lgdt3305_read_status(struct dvb_frontend *fe, fe_status_t *status)
+ switch (state->current_modulation) {
+ case QAM_256:
+ case QAM_64:
++#if 0 /* needed w/3304 to set FE_HAS_SIGNAL */
++ if (cr_lock)
++ *status |= FE_HAS_SIGNAL;
++#endif
+ ret = lgdt3305_read_fec_lock_status(state, &fec_lock);
+ if (lg_fail(ret))
+ goto fail;
+@@ -992,6 +1145,7 @@ static void lgdt3305_release(struct dvb_frontend *fe)
+ kfree(state);
+ }
+
++static struct dvb_frontend_ops lgdt3304_ops;
+ static struct dvb_frontend_ops lgdt3305_ops;
+
+ struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config,
+@@ -1012,11 +1166,21 @@ struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config,
+ state->cfg = config;
+ state->i2c_adap = i2c_adap;
+
+- memcpy(&state->frontend.ops, &lgdt3305_ops,
+- sizeof(struct dvb_frontend_ops));
++ switch (config->demod_chip) {
++ case LGDT3304:
++ memcpy(&state->frontend.ops, &lgdt3304_ops,
++ sizeof(struct dvb_frontend_ops));
++ break;
++ case LGDT3305:
++ memcpy(&state->frontend.ops, &lgdt3305_ops,
++ sizeof(struct dvb_frontend_ops));
++ break;
++ default:
++ goto fail;
++ }
+ state->frontend.demodulator_priv = state;
+
+- /* verify that we're talking to a lg dt3305 */
++ /* verify that we're talking to a lg dt3304/5 */
+ ret = lgdt3305_read_reg(state, LGDT3305_GEN_CTRL_2, &val);
+ if ((lg_fail(ret)) | (val == 0))
+ goto fail;
+@@ -1035,12 +1199,36 @@ struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config,
+
+ return &state->frontend;
+ fail:
+- lg_warn("unable to detect LGDT3305 hardware\n");
++ lg_warn("unable to detect %s hardware\n",
++ config->demod_chip ? "LGDT3304" : "LGDT3305");
+ kfree(state);
+ return NULL;
+ }
+ EXPORT_SYMBOL(lgdt3305_attach);
+
++static struct dvb_frontend_ops lgdt3304_ops = {
++ .info = {
++ .name = "LG Electronics LGDT3304 VSB/QAM Frontend",
++ .type = FE_ATSC,
++ .frequency_min = 54000000,
++ .frequency_max = 858000000,
++ .frequency_stepsize = 62500,
++ .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
++ },
++ .i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl,
++ .init = lgdt3304_init,
++ .sleep = lgdt3304_sleep,
++ .set_frontend = lgdt3304_set_parameters,
++ .get_frontend = lgdt3305_get_frontend,
++ .get_tune_settings = lgdt3305_get_tune_settings,
++ .read_status = lgdt3305_read_status,
++ .read_ber = lgdt3305_read_ber,
++ .read_signal_strength = lgdt3305_read_signal_strength,
++ .read_snr = lgdt3305_read_snr,
++ .read_ucblocks = lgdt3305_read_ucblocks,
++ .release = lgdt3305_release,
++};
++
+ static struct dvb_frontend_ops lgdt3305_ops = {
+ .info = {
+ .name = "LG Electronics LGDT3305 VSB/QAM Frontend",
+@@ -1064,7 +1252,7 @@ static struct dvb_frontend_ops lgdt3305_ops = {
+ .release = lgdt3305_release,
+ };
+
+-MODULE_DESCRIPTION("LG Electronics LGDT3305 ATSC/QAM-B Demodulator Driver");
++MODULE_DESCRIPTION("LG Electronics LGDT3304/5 ATSC/QAM-B Demodulator Driver");
+ MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION("0.1");
+diff --git a/drivers/media/dvb/frontends/lgdt3305.h b/drivers/media/dvb/frontends/lgdt3305.h
+index 9cb11c9..a7f30c2 100644
+--- a/drivers/media/dvb/frontends/lgdt3305.h
++++ b/drivers/media/dvb/frontends/lgdt3305.h
+@@ -41,6 +41,11 @@ enum lgdt3305_tp_valid_polarity {
+ LGDT3305_TP_VALID_HIGH = 1,
+ };
+
++enum lgdt_demod_chip_type {
++ LGDT3305 = 0,
++ LGDT3304 = 1,
++};
++
+ struct lgdt3305_config {
+ u8 i2c_addr;
+
+@@ -65,6 +70,7 @@ struct lgdt3305_config {
+ enum lgdt3305_mpeg_mode mpeg_mode;
+ enum lgdt3305_tp_clock_edge tpclk_edge;
+ enum lgdt3305_tp_valid_polarity tpvalid_polarity;
++ enum lgdt_demod_chip_type demod_chip;
+ };
+
+ #if defined(CONFIG_DVB_LGDT3305) || (defined(CONFIG_DVB_LGDT3305_MODULE) && \
+--
+1.6.6
+
diff --git a/freed-ora/current/F-12/linux-2.6-v4l-dvb-update.patch b/freed-ora/current/F-12/linux-2.6-v4l-dvb-update.patch
new file mode 100644
index 000000000..814bff14b
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-v4l-dvb-update.patch
@@ -0,0 +1,366 @@
+Mauro Carvalho Chehab (1):
+ Merge branch 'next' of ../devel into Fedora
+
+Uri Shkolnik (1):
+ V4L/DVB (11241): Siano: SDIO interface driver - remove two redundant lines
+
+diff --git a/linux/drivers/media/dvb/siano/smssdio.c b/linux/drivers/media/dvb/siano/smssdio.c
+new file mode 100644
+index 0000000..4f8fa59
+--- /dev/null
++++ b/linux/drivers/media/dvb/siano/smssdio.c
+@@ -0,0 +1,354 @@
++/*
++ * smssdio.c - Siano 1xxx SDIO interface driver
++ *
++ * Copyright 2008 Pierre Ossman
++ *
++ * Based on code by Siano Mobile Silicon, Inc.,
++ * Copyright (C) 2006-2008, Uri Shkolnik
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ *
++ * This hardware is a bit odd in that all transfers should be done
++ * to/from the SMSSDIO_DATA register, yet the "increase address" bit
++ * always needs to be set.
++ *
++ * Also, buffers from the card are always aligned to 128 byte
++ * boundaries.
++ */
++
++/*
++ * General cleanup notes:
++ *
++ * - only typedefs should be name *_t
++ *
++ * - use ERR_PTR and friends for smscore_register_device()
++ *
++ * - smscore_getbuffer should zero fields
++ *
++ * Fix stop command
++ */
++
++#include <linux/moduleparam.h>
++#include <linux/firmware.h>
++#include <linux/delay.h>
++#include <linux/mmc/card.h>
++#include <linux/mmc/sdio_func.h>
++#include <linux/mmc/sdio_ids.h>
++
++#include "smscoreapi.h"
++#include "sms-cards.h"
++
++/* Registers */
++
++#define SMSSDIO_DATA 0x00
++#define SMSSDIO_INT 0x04
++
++static const struct sdio_device_id smssdio_ids[] = {
++ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
++ .driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
++ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
++ .driver_data = SMS1XXX_BOARD_SIANO_NOVA_A},
++ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_B0),
++ .driver_data = SMS1XXX_BOARD_SIANO_NOVA_B},
++ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VEGA_A0),
++ .driver_data = SMS1XXX_BOARD_SIANO_VEGA},
++ {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VENICE),
++ .driver_data = SMS1XXX_BOARD_SIANO_VEGA},
++ { /* end: all zeroes */ },
++};
++
++MODULE_DEVICE_TABLE(sdio, smssdio_ids);
++
++struct smssdio_device {
++ struct sdio_func *func;
++
++ struct smscore_device_t *coredev;
++
++ struct smscore_buffer_t *split_cb;
++};
++
++/*******************************************************************/
++/* Siano core callbacks */
++/*******************************************************************/
++
++static int smssdio_sendrequest(void *context, void *buffer, size_t size)
++{
++ int ret;
++ struct smssdio_device *smsdev;
++
++ smsdev = context;
++
++ sdio_claim_host(smsdev->func);
++
++ while (size >= smsdev->func->cur_blksize) {
++ ret = sdio_write_blocks(smsdev->func, SMSSDIO_DATA, buffer, 1);
++ if (ret)
++ goto out;
++
++ buffer += smsdev->func->cur_blksize;
++ size -= smsdev->func->cur_blksize;
++ }
++
++ if (size) {
++ ret = sdio_write_bytes(smsdev->func, SMSSDIO_DATA,
++ buffer, size);
++ }
++
++out:
++ sdio_release_host(smsdev->func);
++
++ return ret;
++}
++
++/*******************************************************************/
++/* SDIO callbacks */
++/*******************************************************************/
++
++static void smssdio_interrupt(struct sdio_func *func)
++{
++ int ret, isr;
++
++ struct smssdio_device *smsdev;
++ struct smscore_buffer_t *cb;
++ struct SmsMsgHdr_ST *hdr;
++ size_t size;
++
++ smsdev = sdio_get_drvdata(func);
++
++ /*
++ * The interrupt register has no defined meaning. It is just
++ * a way of turning of the level triggered interrupt.
++ */
++ isr = sdio_readb(func, SMSSDIO_INT, &ret);
++ if (ret) {
++ dev_err(&smsdev->func->dev,
++ "Unable to read interrupt register!\n");
++ return;
++ }
++
++ if (smsdev->split_cb == NULL) {
++ cb = smscore_getbuffer(smsdev->coredev);
++ if (!cb) {
++ dev_err(&smsdev->func->dev,
++ "Unable to allocate data buffer!\n");
++ return;
++ }
++
++ ret = sdio_read_blocks(smsdev->func, cb->p, SMSSDIO_DATA, 1);
++ if (ret) {
++ dev_err(&smsdev->func->dev,
++ "Error %d reading initial block!\n", ret);
++ return;
++ }
++
++ hdr = cb->p;
++
++ if (hdr->msgFlags & MSG_HDR_FLAG_SPLIT_MSG) {
++ smsdev->split_cb = cb;
++ return;
++ }
++
++ size = hdr->msgLength - smsdev->func->cur_blksize;
++ } else {
++ cb = smsdev->split_cb;
++ hdr = cb->p;
++
++ size = hdr->msgLength - sizeof(struct SmsMsgHdr_ST);
++
++ smsdev->split_cb = NULL;
++ }
++
++ if (hdr->msgLength > smsdev->func->cur_blksize) {
++ void *buffer;
++
++ size = ALIGN(size, 128);
++ buffer = cb->p + hdr->msgLength;
++
++ BUG_ON(smsdev->func->cur_blksize != 128);
++
++ /*
++ * First attempt to transfer all of it in one go...
++ */
++ ret = sdio_read_blocks(smsdev->func, buffer,
++ SMSSDIO_DATA, size / 128);
++ if (ret && ret != -EINVAL) {
++ smscore_putbuffer(smsdev->coredev, cb);
++ dev_err(&smsdev->func->dev,
++ "Error %d reading data from card!\n", ret);
++ return;
++ }
++
++ /*
++ * ..then fall back to one block at a time if that is
++ * not possible...
++ *
++ * (we have to do this manually because of the
++ * problem with the "increase address" bit)
++ */
++ if (ret == -EINVAL) {
++ while (size) {
++ ret = sdio_read_blocks(smsdev->func,
++ buffer, SMSSDIO_DATA, 1);
++ if (ret) {
++ smscore_putbuffer(smsdev->coredev, cb);
++ dev_err(&smsdev->func->dev,
++ "Error %d reading "
++ "data from card!\n", ret);
++ return;
++ }
++
++ buffer += smsdev->func->cur_blksize;
++ if (size > smsdev->func->cur_blksize)
++ size -= smsdev->func->cur_blksize;
++ else
++ size = 0;
++ }
++ }
++ }
++
++ cb->size = hdr->msgLength;
++ cb->offset = 0;
++
++ smscore_onresponse(smsdev->coredev, cb);
++}
++
++static int smssdio_probe(struct sdio_func *func,
++ const struct sdio_device_id *id)
++{
++ int ret;
++
++ int board_id;
++ struct smssdio_device *smsdev;
++ struct smsdevice_params_t params;
++
++ board_id = id->driver_data;
++
++ smsdev = kzalloc(sizeof(struct smssdio_device), GFP_KERNEL);
++ if (!smsdev)
++ return -ENOMEM;
++
++ smsdev->func = func;
++
++ memset(&params, 0, sizeof(struct smsdevice_params_t));
++
++ params.device = &func->dev;
++ params.buffer_size = 0x5000; /* ?? */
++ params.num_buffers = 22; /* ?? */
++ params.context = smsdev;
++
++ snprintf(params.devpath, sizeof(params.devpath),
++ "sdio\\%s", sdio_func_id(func));
++
++ params.sendrequest_handler = smssdio_sendrequest;
++
++ params.device_type = sms_get_board(board_id)->type;
++
++ if (params.device_type != SMS_STELLAR)
++ params.flags |= SMS_DEVICE_FAMILY2;
++ else {
++ /*
++ * FIXME: Stellar needs special handling...
++ */
++ ret = -ENODEV;
++ goto free;
++ }
++
++ ret = smscore_register_device(&params, &smsdev->coredev);
++ if (ret < 0)
++ goto free;
++
++ smscore_set_board_id(smsdev->coredev, board_id);
++
++ sdio_claim_host(func);
++
++ ret = sdio_enable_func(func);
++ if (ret)
++ goto release;
++
++ ret = sdio_set_block_size(func, 128);
++ if (ret)
++ goto disable;
++
++ ret = sdio_claim_irq(func, smssdio_interrupt);
++ if (ret)
++ goto disable;
++
++ sdio_set_drvdata(func, smsdev);
++
++ sdio_release_host(func);
++
++ ret = smscore_start_device(smsdev->coredev);
++ if (ret < 0)
++ goto reclaim;
++
++ return 0;
++
++reclaim:
++ sdio_claim_host(func);
++ sdio_release_irq(func);
++disable:
++ sdio_disable_func(func);
++release:
++ sdio_release_host(func);
++ smscore_unregister_device(smsdev->coredev);
++free:
++ kfree(smsdev);
++
++ return ret;
++}
++
++static void smssdio_remove(struct sdio_func *func)
++{
++ struct smssdio_device *smsdev;
++
++ smsdev = sdio_get_drvdata(func);
++
++ /* FIXME: racy! */
++ if (smsdev->split_cb)
++ smscore_putbuffer(smsdev->coredev, smsdev->split_cb);
++
++ smscore_unregister_device(smsdev->coredev);
++
++ sdio_claim_host(func);
++ sdio_release_irq(func);
++ sdio_disable_func(func);
++ sdio_release_host(func);
++
++ kfree(smsdev);
++}
++
++static struct sdio_driver smssdio_driver = {
++ .name = "smssdio",
++ .id_table = smssdio_ids,
++ .probe = smssdio_probe,
++ .remove = smssdio_remove,
++};
++
++/*******************************************************************/
++/* Module functions */
++/*******************************************************************/
++
++int smssdio_register(void)
++{
++ int ret = 0;
++
++ printk(KERN_INFO "smssdio: Siano SMS1xxx SDIO driver\n");
++ printk(KERN_INFO "smssdio: Copyright Pierre Ossman\n");
++
++ ret = sdio_register_driver(&smssdio_driver);
++
++ return ret;
++}
++
++void smssdio_unregister(void)
++{
++ sdio_unregister_driver(&smssdio_driver);
++}
++
++MODULE_DESCRIPTION("Siano SMS1xxx SDIO driver");
++MODULE_AUTHOR("Pierre Ossman");
++MODULE_LICENSE("GPL");
diff --git a/freed-ora/current/F-12/linux-2.6-vio-modalias.patch b/freed-ora/current/F-12/linux-2.6-vio-modalias.patch
new file mode 100644
index 000000000..057eac781
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-vio-modalias.patch
@@ -0,0 +1,32 @@
+diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
+index f988672..12a0851 100644
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -294,9 +294,27 @@ static ssize_t devspec_show(struct device *dev,
+ return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
+ }
+
++static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ const struct vio_dev *vio_dev = to_vio_dev(dev);
++ struct device_node *dn;
++ const char *cp;
++
++ dn = dev->archdata.of_node;
++ if (!dn)
++ return -ENODEV;
++ cp = of_get_property(dn, "compatible", NULL);
++ if (!cp)
++ return -ENODEV;
++
++ return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
++}
++
+ static struct device_attribute vio_dev_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(devspec),
++ __ATTR_RO(modalias),
+ __ATTR_NULL
+ };
+
diff --git a/freed-ora/current/F-12/linux-2.6-x86-64-fbdev-primary.patch b/freed-ora/current/F-12/linux-2.6-x86-64-fbdev-primary.patch
new file mode 100644
index 000000000..b35096fa1
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6-x86-64-fbdev-primary.patch
@@ -0,0 +1,49 @@
+From cdd54d73203838f249291988d5f79e40fee00a05 Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Thu, 7 Jan 2010 16:59:06 +1000
+Subject: [PATCH] x86: allow fbdev primary video code on 64-bit.
+
+For some reason the 64-bit tree was doing this differently and
+I can't see why it would need to.
+
+This correct behaviour when you have two GPUs plugged in and
+32-bit put the console in one place and 64-bit in another.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+---
+ arch/x86/Makefile | 2 --
+ arch/x86/include/asm/fb.h | 4 ----
+ 2 files changed, 0 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 78b32be..0a43dc5 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -135,9 +135,7 @@ drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/
+ # suspend and hibernation support
+ drivers-$(CONFIG_PM) += arch/x86/power/
+
+-ifeq ($(CONFIG_X86_32),y)
+ drivers-$(CONFIG_FB) += arch/x86/video/
+-endif
+
+ ####
+ # boot loader support. Several targets are kept for legacy purposes
+diff --git a/arch/x86/include/asm/fb.h b/arch/x86/include/asm/fb.h
+index 5301846..2519d06 100644
+--- a/arch/x86/include/asm/fb.h
++++ b/arch/x86/include/asm/fb.h
+@@ -12,10 +12,6 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+ }
+
+-#ifdef CONFIG_X86_32
+ extern int fb_is_primary_device(struct fb_info *info);
+-#else
+-static inline int fb_is_primary_device(struct fb_info *info) { return 0; }
+-#endif
+
+ #endif /* _ASM_X86_FB_H */
+--
+1.6.5.2
+
diff --git a/freed-ora/current/F-12/linux-2.6.29-sparc-IOC_TYPECHECK.patch b/freed-ora/current/F-12/linux-2.6.29-sparc-IOC_TYPECHECK.patch
new file mode 100644
index 000000000..d73c30adc
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6.29-sparc-IOC_TYPECHECK.patch
@@ -0,0 +1,21 @@
+diff -up vanilla-2.6.29-rc7-git2/arch/sparc/include/asm/ioctl.h.BAD vanilla-2.6.29-rc7-git2/arch/sparc/include/asm/ioctl.h
+--- vanilla-2.6.29-rc7-git2/arch/sparc/include/asm/ioctl.h.BAD 2009-03-09 17:01:32.000000000 -0400
++++ vanilla-2.6.29-rc7-git2/arch/sparc/include/asm/ioctl.h 2009-03-09 16:52:27.000000000 -0400
+@@ -41,6 +41,17 @@
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
++#ifdef __KERNEL__
++/* provoke compile error for invalid uses of size argument */
++extern unsigned int __invalid_size_argument_for_IOC;
++#define _IOC_TYPECHECK(t) \
++ ((sizeof(t) == sizeof(t[1]) && \
++ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
++ sizeof(t) : __invalid_size_argument_for_IOC)
++#else
++#define _IOC_TYPECHECK(t) (sizeof(t))
++#endif
++
+ #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+ #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+ #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
diff --git a/freed-ora/current/F-12/linux-2.6.30-hush-rom-warning.patch b/freed-ora/current/F-12/linux-2.6.30-hush-rom-warning.patch
new file mode 100644
index 000000000..a4a0809e5
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6.30-hush-rom-warning.patch
@@ -0,0 +1,27 @@
+diff -up linux-2.6.30.noarch/drivers/pci/setup-res.c.jx linux-2.6.30.noarch/drivers/pci/setup-res.c
+--- linux-2.6.30.noarch/drivers/pci/setup-res.c.jx 2009-07-27 17:56:13.000000000 -0400
++++ linux-2.6.30.noarch/drivers/pci/setup-res.c 2009-07-27 17:58:25.000000000 -0400
+@@ -101,6 +101,7 @@ int pci_claim_resource(struct pci_dev *d
+ struct resource *res = &dev->resource[resource];
+ struct resource *root;
+ int err;
++ const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
+
+ root = pci_find_parent_resource(dev, res);
+
+@@ -108,8 +109,13 @@ int pci_claim_resource(struct pci_dev *d
+ if (root != NULL)
+ err = request_resource(root, res);
+
+- if (err) {
+- const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
++ if (err && resource == 6) {
++ dev_info(&dev->dev, "BAR %d: %s of %s %pR\n",
++ resource,
++ root ? "address space collision on" :
++ "no parent found for",
++ dtype, res);
++ } else if (err) {
+ dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
+ resource,
+ root ? "address space collision on" :
diff --git a/freed-ora/current/F-12/linux-2.6.30-no-pcspkr-modalias.patch b/freed-ora/current/F-12/linux-2.6.30-no-pcspkr-modalias.patch
new file mode 100644
index 000000000..c703b8844
--- /dev/null
+++ b/freed-ora/current/F-12/linux-2.6.30-no-pcspkr-modalias.patch
@@ -0,0 +1,11 @@
+diff -up linux-2.6.30.noarch/drivers/input/misc/pcspkr.c.jx linux-2.6.30.noarch/drivers/input/misc/pcspkr.c
+--- linux-2.6.30.noarch/drivers/input/misc/pcspkr.c.jx 2009-07-28 16:54:44.000000000 -0400
++++ linux-2.6.30.noarch/drivers/input/misc/pcspkr.c 2009-07-28 16:59:36.000000000 -0400
+@@ -23,7 +23,6 @@
+ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
+ MODULE_DESCRIPTION("PC Speaker beeper driver");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("platform:pcspkr");
+
+ #if defined(CONFIG_MIPS) || defined(CONFIG_X86)
+ /* Use the global PIT lock ! */
diff --git a/freed-ora/current/F-12/mac80211-explicitly-disable-enable-QoS.patch b/freed-ora/current/F-12/mac80211-explicitly-disable-enable-QoS.patch
new file mode 100644
index 000000000..2d5454033
--- /dev/null
+++ b/freed-ora/current/F-12/mac80211-explicitly-disable-enable-QoS.patch
@@ -0,0 +1,358 @@
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+To: kernel@lists.fedoraproject.org, "John W. Linville" <linville@redhat.com>
+Subject: [PATCH 2/4 2.6.32.y] iwlwifi: manage QoS by mac stack
+Date: Fri, 11 Jun 2010 17:03:14 +0200
+
+We activate/deactivate QoS and setup default queue parameters in iwlwifi
+driver. Mac stack do the same, so we do not need repeat that work here.
+Stack also will tell when disable QoS, this will fix driver when working
+with older APs, that do not have QoS implemented.
+
+Patch make "force = true" in iwl_active_qos() assuming we always want
+to do with QoS what mac stack wish.
+
+Patch also remove unused qos_cap bits, do not initialize qos_active = 0,
+as we have it initialized to zero by kzalloc.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+---
+ drivers/net/wireless/iwlwifi/iwl-agn.c | 9 --
+ drivers/net/wireless/iwlwifi/iwl-core.c | 147 +++------------------------
+ drivers/net/wireless/iwlwifi/iwl-core.h | 3 +-
+ drivers/net/wireless/iwlwifi/iwl-dev.h | 21 ----
+ drivers/net/wireless/iwlwifi/iwl3945-base.c | 7 --
+ 5 files changed, 17 insertions(+), 170 deletions(-)
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index 921dc4a..b05f198 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -2172,7 +2172,6 @@ void iwl_post_associate(struct iwl_priv *priv)
+ {
+ struct ieee80211_conf *conf = NULL;
+ int ret = 0;
+- unsigned long flags;
+
+ if (priv->iw_mode == NL80211_IFTYPE_AP) {
+ IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
+@@ -2257,10 +2256,6 @@ void iwl_post_associate(struct iwl_priv *priv)
+ if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ priv->assoc_station_added = 1;
+
+- spin_lock_irqsave(&priv->lock, flags);
+- iwl_activate_qos(priv, 0);
+- spin_unlock_irqrestore(&priv->lock, flags);
+-
+ /* the chain noise calibration will enabled PM upon completion
+ * If chain noise has already been run, then we need to enable
+ * power management here */
+@@ -2384,7 +2379,6 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ void iwl_config_ap(struct iwl_priv *priv)
+ {
+ int ret = 0;
+- unsigned long flags;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+@@ -2432,9 +2426,6 @@ void iwl_config_ap(struct iwl_priv *priv)
+ /* restore RXON assoc */
+ priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ iwlcore_commit_rxon(priv);
+- spin_lock_irqsave(&priv->lock, flags);
+- iwl_activate_qos(priv, 1);
+- spin_unlock_irqrestore(&priv->lock, flags);
+ iwl_rxon_add_station(priv, iwl_bcast_addr, 0);
+ }
+ iwl_send_beacon_cmd(priv);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
+index 4a4f7e4..6ce19ea 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.c
++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
+@@ -266,17 +266,13 @@ EXPORT_SYMBOL(iwl_hw_nic_init);
+ /*
+ * QoS support
+ */
+-void iwl_activate_qos(struct iwl_priv *priv, u8 force)
++static void iwl_update_qos(struct iwl_priv *priv)
+ {
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ priv->qos_data.def_qos_parm.qos_flags = 0;
+
+- if (priv->qos_data.qos_cap.q_AP.queue_request &&
+- !priv->qos_data.qos_cap.q_AP.txop_request)
+- priv->qos_data.def_qos_parm.qos_flags |=
+- QOS_PARAM_FLG_TXOP_TYPE_MSK;
+ if (priv->qos_data.qos_active)
+ priv->qos_data.def_qos_parm.qos_flags |=
+ QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+@@ -284,118 +280,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force)
+ if (priv->current_ht_config.is_ht)
+ priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+- if (force || iwl_is_associated(priv)) {
+- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+- priv->qos_data.qos_active,
+- priv->qos_data.def_qos_parm.qos_flags);
++ IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
++ priv->qos_data.qos_active,
++ priv->qos_data.def_qos_parm.qos_flags);
+
+- iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
+- sizeof(struct iwl_qosparam_cmd),
+- &priv->qos_data.def_qos_parm, NULL);
+- }
++ iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
++ sizeof(struct iwl_qosparam_cmd),
++ &priv->qos_data.def_qos_parm, NULL);
+ }
+-EXPORT_SYMBOL(iwl_activate_qos);
+-
+-/*
+- * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
+- * (802.11b) (802.11a/g)
+- * AC_BK 15 1023 7 0 0
+- * AC_BE 15 1023 3 0 0
+- * AC_VI 7 15 2 6.016ms 3.008ms
+- * AC_VO 3 7 2 3.264ms 1.504ms
+- */
+-void iwl_reset_qos(struct iwl_priv *priv)
+-{
+- u16 cw_min = 15;
+- u16 cw_max = 1023;
+- u8 aifs = 2;
+- bool is_legacy = false;
+- unsigned long flags;
+- int i;
+-
+- spin_lock_irqsave(&priv->lock, flags);
+- /* QoS always active in AP and ADHOC mode
+- * In STA mode wait for association
+- */
+- if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
+- priv->iw_mode == NL80211_IFTYPE_AP)
+- priv->qos_data.qos_active = 1;
+- else
+- priv->qos_data.qos_active = 0;
+-
+- /* check for legacy mode */
+- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+- (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
+- (priv->iw_mode == NL80211_IFTYPE_STATION &&
+- (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
+- cw_min = 31;
+- is_legacy = 1;
+- }
+-
+- if (priv->qos_data.qos_active)
+- aifs = 3;
+-
+- /* AC_BE */
+- priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
+- priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
+- priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
+- priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
+- priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
+-
+- if (priv->qos_data.qos_active) {
+- /* AC_BK */
+- i = 1;
+- priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
+- priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
+- priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
+- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
+- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
+-
+- /* AC_VI */
+- i = 2;
+- priv->qos_data.def_qos_parm.ac[i].cw_min =
+- cpu_to_le16((cw_min + 1) / 2 - 1);
+- priv->qos_data.def_qos_parm.ac[i].cw_max =
+- cpu_to_le16(cw_min);
+- priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
+- if (is_legacy)
+- priv->qos_data.def_qos_parm.ac[i].edca_txop =
+- cpu_to_le16(6016);
+- else
+- priv->qos_data.def_qos_parm.ac[i].edca_txop =
+- cpu_to_le16(3008);
+- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
+-
+- /* AC_VO */
+- i = 3;
+- priv->qos_data.def_qos_parm.ac[i].cw_min =
+- cpu_to_le16((cw_min + 1) / 4 - 1);
+- priv->qos_data.def_qos_parm.ac[i].cw_max =
+- cpu_to_le16((cw_min + 1) / 2 - 1);
+- priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
+- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
+- if (is_legacy)
+- priv->qos_data.def_qos_parm.ac[i].edca_txop =
+- cpu_to_le16(3264);
+- else
+- priv->qos_data.def_qos_parm.ac[i].edca_txop =
+- cpu_to_le16(1504);
+- } else {
+- for (i = 1; i < 4; i++) {
+- priv->qos_data.def_qos_parm.ac[i].cw_min =
+- cpu_to_le16(cw_min);
+- priv->qos_data.def_qos_parm.ac[i].cw_max =
+- cpu_to_le16(cw_max);
+- priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
+- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
+- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
+- }
+- }
+- IWL_DEBUG_QOS(priv, "set QoS to default \n");
+-
+- spin_unlock_irqrestore(&priv->lock, flags);
+-}
+-EXPORT_SYMBOL(iwl_reset_qos);
+
+ #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+ #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+@@ -1503,11 +1395,6 @@ int iwl_init_drv(struct iwl_priv *priv)
+
+ iwl_init_scan_params(priv);
+
+- iwl_reset_qos(priv);
+-
+- priv->qos_data.qos_active = 0;
+- priv->qos_data.qos_cap.val = 0;
+-
+ priv->rates_mask = IWL_RATES_MASK;
+ /* Set the tx_power_user_lmt to the lowest power level
+ * this value will get overwritten by channel max power avg
+@@ -2213,12 +2100,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ cpu_to_le16((params->txop * 32));
+
+ priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+- priv->qos_data.qos_active = 1;
+-
+- if (priv->iw_mode == NL80211_IFTYPE_AP)
+- iwl_activate_qos(priv, 1);
+- else if (priv->assoc_id && iwl_is_associated(priv))
+- iwl_activate_qos(priv, 0);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+@@ -2452,11 +2333,8 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+- iwl_reset_qos(priv);
+-
+ priv->cfg->ops->lib->post_associate(priv);
+
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(iwl_mac_beacon_update);
+@@ -2674,6 +2552,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+
++ if (changed & IEEE80211_CONF_CHANGE_QOS) {
++ bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS);
++
++ spin_lock_irqsave(&priv->lock, flags);
++ priv->qos_data.qos_active = qos_active;
++ iwl_update_qos(priv);
++ spin_unlock_irqrestore(&priv->lock, flags);
++ }
++
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ goto out;
+@@ -2744,8 +2631,6 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
+ memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+- iwl_reset_qos(priv);
+-
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->assoc_id = 0;
+ priv->assoc_capability = 0;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
+index 40ec0c1..d5000c7 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.h
++++ b/drivers/net/wireless/iwlwifi/iwl-core.h
+@@ -266,8 +266,7 @@ struct iwl_cfg {
+ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
+ struct ieee80211_ops *hw_ops);
+ void iwl_hw_detect(struct iwl_priv *priv);
+-void iwl_reset_qos(struct iwl_priv *priv);
+-void iwl_activate_qos(struct iwl_priv *priv, u8 force);
++void iwl_activate_qos(struct iwl_priv *priv);
+ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
+ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
+index cea2ee2..24faad7 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
+@@ -514,30 +514,9 @@ struct iwl_ht_info {
+ u8 non_GF_STA_present;
+ };
+
+-union iwl_qos_capabity {
+- struct {
+- u8 edca_count:4; /* bit 0-3 */
+- u8 q_ack:1; /* bit 4 */
+- u8 queue_request:1; /* bit 5 */
+- u8 txop_request:1; /* bit 6 */
+- u8 reserved:1; /* bit 7 */
+- } q_AP;
+- struct {
+- u8 acvo_APSD:1; /* bit 0 */
+- u8 acvi_APSD:1; /* bit 1 */
+- u8 ac_bk_APSD:1; /* bit 2 */
+- u8 ac_be_APSD:1; /* bit 3 */
+- u8 q_ack:1; /* bit 4 */
+- u8 max_len:2; /* bit 5-6 */
+- u8 more_data_ack:1; /* bit 7 */
+- } q_STA;
+- u8 val;
+-};
+-
+ /* QoS structures */
+ struct iwl_qos_info {
+ int qos_active;
+- union iwl_qos_capabity qos_cap;
+ struct iwl_qosparam_cmd def_qos_parm;
+ };
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+index 619590d..95447ca 100644
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -3091,8 +3091,6 @@ void iwl3945_post_associate(struct iwl_priv *priv)
+ break;
+ }
+
+- iwl_activate_qos(priv, 0);
+-
+ /* we have just associated, don't start scan too early */
+ priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
+ }
+@@ -3805,11 +3803,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
+
+ priv->iw_mode = NL80211_IFTYPE_STATION;
+
+- iwl_reset_qos(priv);
+-
+- priv->qos_data.qos_active = 0;
+- priv->qos_data.qos_cap.val = 0;
+-
+ priv->rates_mask = IWL_RATES_MASK;
+ priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
+
+--
+1.6.2.5
+
+_______________________________________________
+kernel mailing list
+kernel@lists.fedoraproject.org
+https://admin.fedoraproject.org/mailman/listinfo/kernel
+
diff --git a/freed-ora/current/F-12/merge.pl b/freed-ora/current/F-12/merge.pl
new file mode 100755
index 000000000..8c318156a
--- /dev/null
+++ b/freed-ora/current/F-12/merge.pl
@@ -0,0 +1,66 @@
+#! /usr/bin/perl
+
+my @args=@ARGV;
+my %configvalues;
+my @configoptions;
+my $configcounter = 0;
+
+# optionally print out the architecture as the first line of our output
+my $arch = $args[2];
+if (defined $arch) {
+ print "# $arch\n";
+}
+
+# first, read the override file
+
+open (FILE,"$args[0]") || die "Could not open $args[0]";
+while (<FILE>) {
+ my $str = $_;
+ my $configname;
+
+ if (/\# ([\w]+) is not set/) {
+ $configname = $1;
+ } elsif (/([\w]+)=/) {
+ $configname = $1;
+ }
+
+ if (defined($configname) && !exists($configvalues{$configname})) {
+ $configvalues{$configname} = $str;
+ $configoptions[$configcounter] = $configname;
+ $configcounter ++;
+ }
+};
+
+# now, read and output the entire configfile, except for the overridden
+# parts... for those the new value is printed.
+
+open (FILE2,"$args[1]") || die "Could not open $args[1]";
+while (<FILE2>) {
+ my $configname;
+
+ if (/\# ([\w]+) is not set/) {
+ $configname = $1;
+ } elsif (/([\w]+)=/) {
+ $configname = $1;
+ }
+
+ if (defined($configname) && exists($configvalues{$configname})) {
+ print "$configvalues{$configname}";
+ delete($configvalues{$configname});
+ } else {
+ print "$_";
+ }
+}
+
+# now print the new values from the overridden configfile
+my $counter = 0;
+
+while ($counter < $configcounter) {
+ my $configname = $configoptions[$counter];
+ if (exists($configvalues{$configname})) {
+ print "$configvalues{$configname}";
+ }
+ $counter++;
+}
+
+1;
diff --git a/freed-ora/current/F-12/mirrors b/freed-ora/current/F-12/mirrors
deleted file mode 100644
index d4d35d639..000000000
--- a/freed-ora/current/F-12/mirrors
+++ /dev/null
@@ -1,6 +0,0 @@
-http://linux-libre.fsfla.org/pub/linux-libre/freed-ora/src/
-http://ftp.kernel.org/pub/linux/kernel/v2.6/snapshots
-http://ftp.kernel.org/pub/linux/kernel/v2.6
-http://ftp.kernel.org/pub/linux/kernel/v2.6/testing
-http://ftp.kernel.org/pub/linux/kernel/v2.6/stable-review
-http://ftp.kernel.org/pub/linux/kernel/v2.6/snapshots/old
diff --git a/freed-ora/current/F-12/patch-libre-2.6.32.16.bz2.sign b/freed-ora/current/F-12/patch-libre-2.6.32.16.bz2.sign
deleted file mode 100644
index 9fd1f7c6e..000000000
--- a/freed-ora/current/F-12/patch-libre-2.6.32.16.bz2.sign
+++ /dev/null
@@ -1,7 +0,0 @@
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v2.0.14 (GNU/Linux)
-
-iEYEABECAAYFAkw0unkACgkQvLfPh359R6ezigCaA7ulnShItsPKlsyGUNnEnUE/
-o44AoKDj12hglCxrkaxffCK9R7Es6jAx
-=gNd4
------END PGP SIGNATURE-----
diff --git a/freed-ora/current/F-12/patch-libre-2.6.32.16.xdelta b/freed-ora/current/F-12/patch-libre-2.6.32.16.xdelta
deleted file mode 100644
index 9f5dd47f7..000000000
--- a/freed-ora/current/F-12/patch-libre-2.6.32.16.xdelta
+++ /dev/null
Binary files differ
diff --git a/freed-ora/current/F-12/patch-libre-2.6.32.16.xdelta.sign b/freed-ora/current/F-12/patch-libre-2.6.32.16.xdelta.sign
deleted file mode 100644
index 1a3f12c50..000000000
--- a/freed-ora/current/F-12/patch-libre-2.6.32.16.xdelta.sign
+++ /dev/null
@@ -1,7 +0,0 @@
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v2.0.14 (GNU/Linux)
-
-iEYEABECAAYFAkw0unYACgkQvLfPh359R6ef1gCcCILszyOgG9YVfmHc59jlapM6
-+oQAn3X3WJYk4drGevGYHOHf6kH0yGjv
-=HZiK
------END PGP SIGNATURE-----
diff --git a/freed-ora/current/F-12/patch-libre-2.6.32.18.bz2.sign b/freed-ora/current/F-12/patch-libre-2.6.32.18.bz2.sign
new file mode 100644
index 000000000..c8c1f5ac5
--- /dev/null
+++ b/freed-ora/current/F-12/patch-libre-2.6.32.18.bz2.sign
@@ -0,0 +1,7 @@
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v2.0.14 (GNU/Linux)
+
+iEYEABECAAYFAkxjeOoACgkQvLfPh359R6diJACfRWmXaRr/urroiB62BC0eSboF
+bl8An3IG37OXASX3l0wcFyNohSax3OHf
+=wWiY
+-----END PGP SIGNATURE-----
diff --git a/freed-ora/current/F-12/patch-libre-2.6.32.18.xdelta b/freed-ora/current/F-12/patch-libre-2.6.32.18.xdelta
new file mode 100644
index 000000000..11bcdeda9
--- /dev/null
+++ b/freed-ora/current/F-12/patch-libre-2.6.32.18.xdelta
Binary files differ
diff --git a/freed-ora/current/F-12/patch-libre-2.6.32.18.xdelta.sign b/freed-ora/current/F-12/patch-libre-2.6.32.18.xdelta.sign
new file mode 100644
index 000000000..a1686efe7
--- /dev/null
+++ b/freed-ora/current/F-12/patch-libre-2.6.32.18.xdelta.sign
@@ -0,0 +1,7 @@
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v2.0.14 (GNU/Linux)
+
+iEYEABECAAYFAkxjeOgACgkQvLfPh359R6cO4ACggfeqi/BPNx3ZoQIb4JuLtmsG
+oYoAoJwc17fkChs1WEZy7dr9OAOseu0x
+=oO9t
+-----END PGP SIGNATURE-----
diff --git a/freed-ora/current/F-12/pci-acpi-disable-aspm-if-no-osc.patch b/freed-ora/current/F-12/pci-acpi-disable-aspm-if-no-osc.patch
new file mode 100644
index 000000000..82f6a9c85
--- /dev/null
+++ b/freed-ora/current/F-12/pci-acpi-disable-aspm-if-no-osc.patch
@@ -0,0 +1,55 @@
+From: Matthew Garrett <mjg@redhat.com>
+Subject: ACPI: Disable ASPM if the platform won't provide _OSC control for PCIe
+
+ACPI: Disable ASPM if the platform won't provide _OSC control for PCIe
+
+[ backport to 2.6.32 ]
+
+The PCI SIG documentation for the _OSC OS/firmware handshaking interface
+states:
+
+"If the _OSC control method is absent from the scope of a host bridge
+device, then the operating system must not enable or attempt to use any
+features defined in this section for the hierarchy originated by the host
+bridge."
+
+The obvious interpretation of this is that the OS should not attempt to use
+PCIe hotplug, PME or AER - however, the specification also notes that an
+_OSC method is *required* for PCIe hierarchies, and experimental validation
+with An Alternative OS indicates that it doesn't use any PCIe functionality
+if the _OSC method is missing. That arguably means we shouldn't be using
+MSI or extended config space, but right now our problems seem to be limited
+to vendors being surprised when ASPM gets enabled on machines when other
+OSs refuse to do so. So, for now, let's just disable ASPM if the _OSC
+method doesn't exist or refuses to hand over PCIe capability control.
+
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+---
+
+diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
+index 4eac593..1f67057 100644
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -33,6 +33,7 @@
+ #include <linux/pm.h>
+ #include <linux/pci.h>
+ #include <linux/pci-acpi.h>
++#include <linux/pci-aspm.h>
+ #include <linux/acpi.h>
+ #include <acpi/acpi_bus.h>
+ #include <acpi/acpi_drivers.h>
+@@ -543,6 +544,14 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
+ if (flags != base_flags)
+ acpi_pci_osc_support(root, flags);
+
++ status = acpi_pci_osc_control_set(root->device->handle,
++ 0);
++
++ if (status == AE_NOT_EXIST) {
++ printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
++ pcie_no_aspm();
++ }
++
+ return 0;
+
+ end: \ No newline at end of file
diff --git a/freed-ora/current/F-12/pci-aspm-dont-enable-too-early.patch b/freed-ora/current/F-12/pci-aspm-dont-enable-too-early.patch
new file mode 100644
index 000000000..ea91a2554
--- /dev/null
+++ b/freed-ora/current/F-12/pci-aspm-dont-enable-too-early.patch
@@ -0,0 +1,50 @@
+From: Matthew Garrett <mjg@redhat.com>
+Date: Wed, 9 Jun 2010 20:05:07 +0000 (-0400)
+Subject: PCI: Don't enable aspm before drivers have had a chance to veto it
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fjbarnes%2Fpci-2.6.git;a=commitdiff_plain;h=8f0b08c29f1df91315e48adce04462eb23671099
+
+PCI: Don't enable aspm before drivers have had a chance to veto it
+
+The aspm code will currently set the configured aspm policy before drivers
+have had an opportunity to indicate that their hardware doesn't support it.
+Unfortunately, putting some hardware in L0 or L1 can result in the hardware
+no longer responding to any requests, even after aspm is disabled. It makes
+more sense to leave aspm policy at the BIOS defaults at initial setup time,
+reconfiguring it after pci_enable_device() is called. This allows the
+driver to blacklist individual devices beforehand.
+
+Reviewed-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+---
+
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index be53d98..7122281 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -588,11 +588,23 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
+ * update through pcie_aspm_cap_init().
+ */
+ pcie_aspm_cap_init(link, blacklist);
+- pcie_config_aspm_path(link);
+
+ /* Setup initial Clock PM state */
+ pcie_clkpm_cap_init(link, blacklist);
+- pcie_set_clkpm(link, policy_to_clkpm_state(link));
++
++ /*
++ * At this stage drivers haven't had an opportunity to change the
++ * link policy setting. Enabling ASPM on broken hardware can cripple
++ * it even before the driver has had a chance to disable ASPM, so
++ * default to a safe level right now. If we're enabling ASPM beyond
++ * the BIOS's expectation, we'll do so once pci_enable_device() is
++ * called.
++ */
++ if (aspm_policy != POLICY_POWERSAVE) {
++ pcie_config_aspm_path(link);
++ pcie_set_clkpm(link, policy_to_clkpm_state(link));
++ }
++
+ unlock:
+ mutex_unlock(&aspm_lock);
+ out:
diff --git a/freed-ora/current/F-12/perf b/freed-ora/current/F-12/perf
new file mode 100644
index 000000000..ea8980694
--- /dev/null
+++ b/freed-ora/current/F-12/perf
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# In pathological situations, this will print some error about uname.
+kverrel="`uname -r`" || exit
+
+exec "/usr/libexec/perf.$kverrel" ${1+"$@"}
+rc=$?
+
+# We're still here, so the exec failed.
+echo >&2 "Sorry, your kernel ($kverrel) doesn't support perf."
+
+exit $rc
diff --git a/freed-ora/current/F-12/scripts/CVS/Entries b/freed-ora/current/F-12/scripts/CVS/Entries
deleted file mode 100644
index 1918f5526..000000000
--- a/freed-ora/current/F-12/scripts/CVS/Entries
+++ /dev/null
@@ -1,25 +0,0 @@
-/bumpspecfile.py/1.5/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/check-TODO.sh/1.3/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/combine.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/configcommon.pl/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/configdiff.pl/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-amd64.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-i586.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-i686.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-ia64.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-iseries.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-ppc.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-ppc64.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-ppc8260.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-ppc8560.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-pseries.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-s390.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/cross-s390x.sh/1.1/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/get-snapshot.sh/1.8/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/newpatch.sh/1.2/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/pull-upstreams.sh/1.7/Tue Jan 5 02:22:20 2010//Tkernel-2_6_32_16-154_fc12
-/rebase.sh/1.27/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/reconfig.sh/1.9/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/rediffall.pl/1.4/Tue Jan 5 02:21:40 2010//Tkernel-2_6_32_16-154_fc12
-/sort-config/1.9/Thu Jan 7 10:40:04 2010//Tkernel-2_6_32_16-154_fc12
-D
diff --git a/freed-ora/current/F-12/scripts/CVS/Repository b/freed-ora/current/F-12/scripts/CVS/Repository
deleted file mode 100644
index 7dba9b651..000000000
--- a/freed-ora/current/F-12/scripts/CVS/Repository
+++ /dev/null
@@ -1 +0,0 @@
-rpms/kernel/F-12/scripts
diff --git a/freed-ora/current/F-12/scripts/CVS/Root b/freed-ora/current/F-12/scripts/CVS/Root
deleted file mode 100644
index d426f1a67..000000000
--- a/freed-ora/current/F-12/scripts/CVS/Root
+++ /dev/null
@@ -1 +0,0 @@
-:pserver:anonymous@cvs.fedoraproject.org.:/cvs/pkgs
diff --git a/freed-ora/current/F-12/scripts/CVS/Tag b/freed-ora/current/F-12/scripts/CVS/Tag
deleted file mode 100644
index 9b507f95b..000000000
--- a/freed-ora/current/F-12/scripts/CVS/Tag
+++ /dev/null
@@ -1 +0,0 @@
-Nkernel-2_6_32_16-154_fc12
diff --git a/freed-ora/current/F-12/scripts/bumpspecfile.py b/freed-ora/current/F-12/scripts/bumpspecfile.py
new file mode 100755
index 000000000..478e82871
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/bumpspecfile.py
@@ -0,0 +1,71 @@
+#!/usr/bin/python
+#
+# Needs $GIT_COMMITTER_NAME and $GIT_COMMITTER_EMAIL set.
+#
+import re
+import sys
+import time
+import os
+import string
+
+class Specfile:
+ def __init__(self,filename):
+ file=open(filename,"r")
+ self.lines=file.readlines()
+ self.vr=""
+
+ def getNextVR(self,aspec):
+ # Get VR for changelog entry.
+ (ver,rel) = os.popen("LC_ALL=C rpm --specfile -q --qf '%%{version} %%{release}\n' --define 'dist %%{nil}' %s | head -1" % aspec).read().strip().split(' ')
+ pos = 0
+ # general released kernel case, bump 1st field
+ fedora_build = rel.split('.')[pos]
+ if fedora_build == "0":
+ # this is a devel kernel, bump 2nd field
+ pos = 1
+ elif rel.split('.')[-1] != fedora_build:
+ # this is a branch, must bump 3rd field
+ pos = 2
+ fedora_build = rel.split('.')[pos]
+ if pos == 1 and len(rel.split('.')) > 4:
+ # uh... what? devel kernel in a branch? private build? just do no VR in clog...
+ print "Warning: not adding any VR to changelog, couldn't tell for sure which field to bump"
+ pos = -1
+ next_fedora_build = int(fedora_build) + 1
+ if pos == 0:
+ nextrel = str(next_fedora_build)
+ elif pos == 1:
+ nextrel = "0." + str(next_fedora_build)
+ elif pos == 2:
+ nextrel = rel.split('.')[0] + "." + rel.split('.')[1] + "." + str(next_fedora_build)
+ if pos >= 0:
+ for s in rel.split('.')[pos + 1:]:
+ nextrel = nextrel + "." + s
+ self.vr = " "+ver+'-'+nextrel
+
+ def addChangelogEntry(self,entry):
+ user = os.environ.get("GIT_COMMITTER_NAME","unknown")
+ email = os.environ.get("GIT_COMMITTER_EMAIL","unknown")
+ if (email == "unknown"):
+ email = os.environ.get("USER","unknown")+"@fedoraproject.org"
+ changematch=re.compile(r"^%changelog")
+ date=time.strftime("%a %b %d %Y", time.localtime(time.time()))
+ newchangelogentry="%changelog\n* "+date+" "+user+" <"+email+">"+self.vr+"\n"+entry+"\n\n"
+ for i in range(len(self.lines)):
+ if(changematch.match(self.lines[i])):
+ self.lines[i]=newchangelogentry
+ break
+
+ def writeFile(self,filename):
+ file=open(filename,"w")
+ file.writelines(self.lines)
+ file.close()
+
+if __name__=="__main__":
+ aspec=(sys.argv[1])
+ s=Specfile(aspec)
+ entry=(sys.argv[2])
+ s.getNextVR(aspec)
+ s.addChangelogEntry(entry)
+ s.writeFile(aspec)
+
diff --git a/freed-ora/current/F-12/scripts/check-TODO.sh b/freed-ora/current/F-12/scripts/check-TODO.sh
new file mode 100755
index 000000000..7067f0b44
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/check-TODO.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+for i in `grep ^* TODO | awk '{ print $2 }'`
+do
+ if [ ! -f $i ]; then
+ echo "$i referenced in the TODO, but isn't in CVS!"
+ fi;
+done
+
+# sometimes dead stuff lingers in cvs, even though it's not in the specfile.
+for i in *.patch
+do
+ for j in $(grep $i kernel.spec | grep Apply.*Patch | awk '{ print $2 }' | wc -l)
+ do
+ if [ "$j" = "0" ]; then
+ echo $i is in CVS, but not applied in spec file.
+ grep $i TODO | awk '{ print $2 " is also still in the TODO" }'
+ fi
+ done
+done
+
+#for i in `grep ApplyPatch kernel.spec | awk '{ print $2 }'`
+#do
+# R=$(grep $i TODO)
+# echo "$i is in CVS, but not mentioned in the TODO!"
+#done
+
diff --git a/freed-ora/current/F-12/scripts/combine.sh b/freed-ora/current/F-12/scripts/combine.sh
new file mode 100755
index 000000000..86a68d302
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/combine.sh
@@ -0,0 +1,34 @@
+#! /bin/sh
+
+# combine a set of quilt patches
+
+# $1 : base dir (source tree)
+# $2 : quilt dir (patches to apply)
+# $3 : pre-patch to apply first (optional)
+
+# e.g.:
+# combine.sh /home/user/fedora/trunk/kernel/F-11/kernel-2.6.30/vanilla-2.6.30 \
+# /home/user/git/stable-queue/queue-2.6.30 \
+# /home/user/fedora/trunk/kernel/F-11/patch-2.6.30.5.bz2
+
+if [ $# -lt 2 ] ; then
+ exit 1
+fi
+
+TD="combine_temp.d"
+
+cd $1 || exit 1
+cd ..
+[ -d $TD ] && rm -Rf $TD
+mkdir $TD || exit 1
+cd $TD
+
+cp -al ../$(basename $1) work.d
+cd work.d
+[ "$3" ] && bzcat $3 | patch -p1 -s
+ln -s $2 patches
+[ -h patches ] || exit 1
+quilt snapshot
+quilt upgrade
+quilt push -a -q
+quilt diff --snapshot >../combined.patch
diff --git a/freed-ora/current/F-12/scripts/configcommon.pl b/freed-ora/current/F-12/scripts/configcommon.pl
new file mode 100644
index 000000000..38bbe80dc
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/configcommon.pl
@@ -0,0 +1,82 @@
+#! /usr/bin/perl
+
+my @args=@ARGV;
+my @configoptions;
+my @configvalues;
+my @common;
+my $configcounter = 0;
+
+# first, read the 1st file
+
+open (FILE,"$args[0]") || die "Could not open $args[0]";
+while (<FILE>) {
+ my $str = $_;
+ if (/\# ([\w]+) is not set/) {
+ $configoptions[$configcounter] = $1;
+ $configvalues[$configcounter] = $str;
+ $common[$configcounter] = 1;
+ $configcounter ++;
+ } else {
+ if (/([\w]+)=/) {
+ $configoptions[$configcounter] = $1;
+ $configvalues[$configcounter] = $str;
+ $common[$configcounter] = 1;
+ $configcounter ++;
+ } else {
+ $configoptions[$configcounter] = "foobarbar";
+ $configvalues[$configcounter] = $str;
+ $common[$configcounter] = 1;
+ $configcounter ++;
+ }
+ }
+};
+
+# now, read all configfiles and see of the options match the initial one.
+# if not, mark it not common
+my $cntr=1;
+
+
+while ($cntr < @ARGV) {
+ open (FILE,$args[$cntr]) || die "Could not open $args[$cntr]";
+ while (<FILE>) {
+ my $nooutput;
+ my $counter;
+ my $configname;
+
+ if (/\# ([\w]+) is not set/) {
+ $configname = $1;
+ } else {
+ if (/([\w]+)=/) {
+ $configname = $1;
+ }
+ }
+
+ $counter = 0;
+ $nooutput = 0;
+ while ($counter < $configcounter) {
+ if ("$configname" eq "$configoptions[$counter]") {
+ if ("$_" eq "$configvalues[$counter]") {
+ 1;
+ } else {
+ $common[$counter] = 0;
+ }
+ }
+ $counter++;
+ }
+ }
+
+ $cntr++;
+}
+
+# now print the common values
+my $counter = 0;
+
+while ($counter < $configcounter) {
+ if ($common[$counter]!=0) {
+ print "$configvalues[$counter]";
+ }
+ $counter++;
+}
+
+1;
+
diff --git a/freed-ora/current/F-12/scripts/configdiff.pl b/freed-ora/current/F-12/scripts/configdiff.pl
new file mode 100644
index 000000000..848d8df0f
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/configdiff.pl
@@ -0,0 +1,76 @@
+#! /usr/bin/perl
+
+my @args=@ARGV;
+my @configoptions;
+my @configvalues;
+my @alreadyprinted;
+my $configcounter = 0;
+
+# first, read the override file
+
+open (FILE,"$args[0]") || die "Could not open $args[0]";
+while (<FILE>) {
+ my $str = $_;
+ if (/\# ([\w]+) is not set/) {
+ $configoptions[$configcounter] = $1;
+ $configvalues[$configcounter] = $str;
+ $alreadprinted[$configcounter] = 0;
+ $configcounter ++;
+ } else {
+ if (/([\w]+)=/) {
+ $configoptions[$configcounter] = $1;
+ $configvalues[$configcounter] = $str;
+ $alreadprinted[$configcounter] = 0;
+ $configcounter ++;
+ } else {
+ $configoptions[$configcounter] = "$_";
+ $configvalues[$configcounter] = $str;
+ $alreadprinted[$configcounter] = 0;
+ $configcounter ++;
+ }
+ }
+};
+
+# now, read and output the entire configfile, except for the overridden
+# parts... for those the new value is printed.
+# O(N^2) algorithm so if this is slow I need to look at it later
+
+open (FILE2,"$args[1]") || die "Could not open $args[1]";
+while (<FILE2>) {
+ my $nooutput;
+ my $counter;
+ my $configname="$_";
+ my $match;
+
+ if (/\# ([\w]+) is not set/) {
+ $configname = $1;
+ } else {
+ if (/([\w]+)=/) {
+ $configname = $1;
+ }
+ }
+
+ $counter = 0;
+ $nooutput = 0;
+ $match = 0;
+# print "C : $configname";
+ while ($counter < $configcounter) {
+ if ("$configname" eq "$configoptions[$counter]") {
+ if ( ("$_" eq "$configvalues[$counter]") || ("$configname" eq "") ) {
+ $match = 1;
+ } else {
+ $alreadyprinted[$configcounter] = 1;
+ print "$_";
+ $match = 1;
+ }
+ }
+ $counter++;
+ }
+ if ($match == 0) {
+ print "$_";
+ }
+
+}
+
+
+1;
diff --git a/freed-ora/current/F-12/scripts/cross-amd64.sh b/freed-ora/current/F-12/scripts/cross-amd64.sh
new file mode 100644
index 000000000..b3119d8fc
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-amd64.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=x86_64-linux- ARCH=x86_64 hammer
+
diff --git a/freed-ora/current/F-12/scripts/cross-i586.sh b/freed-ora/current/F-12/scripts/cross-i586.sh
new file mode 100644
index 000000000..000f4ae7e
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-i586.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make ARCH=i386 i586
+
diff --git a/freed-ora/current/F-12/scripts/cross-i686.sh b/freed-ora/current/F-12/scripts/cross-i686.sh
new file mode 100644
index 000000000..5c0cfe137
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-i686.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make ARCH=i386 i686
+
diff --git a/freed-ora/current/F-12/scripts/cross-ia64.sh b/freed-ora/current/F-12/scripts/cross-ia64.sh
new file mode 100644
index 000000000..5699a57c1
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-ia64.sh
@@ -0,0 +1,2 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=ia64-linux- ARCH=ia64 ia64
diff --git a/freed-ora/current/F-12/scripts/cross-iseries.sh b/freed-ora/current/F-12/scripts/cross-iseries.sh
new file mode 100644
index 000000000..71bfd9156
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-iseries.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=ppc64-linux- ARCH=ppc64 ppc64iseries
+
diff --git a/freed-ora/current/F-12/scripts/cross-ppc.sh b/freed-ora/current/F-12/scripts/cross-ppc.sh
new file mode 100644
index 000000000..c49b740f3
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-ppc.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=ppc-linux- ARCH=ppc ppc
+
diff --git a/freed-ora/current/F-12/scripts/cross-ppc64.sh b/freed-ora/current/F-12/scripts/cross-ppc64.sh
new file mode 100644
index 000000000..fb29d4481
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-ppc64.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=ppc64-linux- ARCH=ppc64 ppc64
+
diff --git a/freed-ora/current/F-12/scripts/cross-ppc8260.sh b/freed-ora/current/F-12/scripts/cross-ppc8260.sh
new file mode 100644
index 000000000..10fbc3292
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-ppc8260.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=ppc-linux- ARCH=ppc ppc8260
+
diff --git a/freed-ora/current/F-12/scripts/cross-ppc8560.sh b/freed-ora/current/F-12/scripts/cross-ppc8560.sh
new file mode 100644
index 000000000..405f98a9c
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-ppc8560.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=ppc-linux- ARCH=ppc ppc8560
+
diff --git a/freed-ora/current/F-12/scripts/cross-pseries.sh b/freed-ora/current/F-12/scripts/cross-pseries.sh
new file mode 100644
index 000000000..724a8e582
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-pseries.sh
@@ -0,0 +1,3 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=ppc64-linux- ARCH=ppc64 pseries64
+
diff --git a/freed-ora/current/F-12/scripts/cross-s390.sh b/freed-ora/current/F-12/scripts/cross-s390.sh
new file mode 100644
index 000000000..4a274397c
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-s390.sh
@@ -0,0 +1,2 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=s390-linux- ARCH=s390 s390
diff --git a/freed-ora/current/F-12/scripts/cross-s390x.sh b/freed-ora/current/F-12/scripts/cross-s390x.sh
new file mode 100644
index 000000000..1cdc98a73
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/cross-s390x.sh
@@ -0,0 +1,2 @@
+export PATH=$PATH:/opt/cross/bin
+make CROSS_COMPILE=s390x-linux- ARCH=s390 s390x
diff --git a/freed-ora/current/F-12/scripts/get-snapshot.sh b/freed-ora/current/F-12/scripts/get-snapshot.sh
new file mode 100755
index 000000000..79d2b095e
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/get-snapshot.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+VER=$(tail -n1 upstream | sed s/bz2/id/)
+rm -f $VER
+wget -c http://www.kernel.org/pub/linux/kernel/v2.6/snapshots/$VER
+SHA1=$(cat $VER)
+rm -f patch-2.6.*-git*.id
+
+cd ~/src/git-trees/kernel/linux-2.6
+git pull
+
+DIF=$(git diff $SHA1.. | wc -l)
+if [ "$DIF" = "0" ]; then
+ echo Nothing changed.
+ exit
+fi
+TOT=$(git log | head -n1)
+
+git diff $SHA1.. > ~/src/fedora/kernel/devel/git-linus-new.diff
+cd ~/src/fedora/kernel/devel/
+DIF=$(cmp git-linus.diff git-linus-new.diff)
+if [ "$?" = "0" ]; then
+ echo Nothing new in git
+ rm -f git-linus-new.diff
+ exit
+fi
+mv git-linus-new.diff git-linus.diff
+
+perl -p -i -e 's|^#ApplyPatch\ git-linus.diff|ApplyPatch\ git-linus.diff|' kernel.spec
+
+echo "- Merge Linux-2.6 up to" $TOT > ~/src/fedora/kernel/devel/clog.tmp
+cd ~/src/fedora/kernel/devel/
+bumpspecfile.py kernel.spec "$(cat clog.tmp)"
+rm -f clog.tmp
+make clog
diff --git a/freed-ora/current/F-12/scripts/grab-logs.sh b/freed-ora/current/F-12/scripts/grab-logs.sh
new file mode 100755
index 000000000..8a445ec99
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/grab-logs.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+VER=$(make verrel)
+ver=$(echo $VER | sed -e 's/-/ /g' | awk '{print $2}')
+rev=$(echo $VER | sed -e 's/-/ /g' | awk '{print $3}')
+
+if [ -d logs ]; then
+ DIR=logs/
+else
+ DIR=./
+fi
+
+wget -O $DIR/build-$VER-i686.log http://kojipkgs.fedoraproject.org/packages/kernel/$ver/$rev/data/logs/i686/build.log
+wget -O $DIR/build-$VER-x86-64.log http://kojipkgs.fedoraproject.org/packages/kernel/$ver/$rev/data/logs/x86_64/build.log
+wget -O $DIR/build-$VER-noarch.log http://kojipkgs.fedoraproject.org/packages/kernel/$ver/$rev/data/logs/noarch/build.log
+
diff --git a/freed-ora/current/F-12/scripts/newpatch.sh b/freed-ora/current/F-12/scripts/newpatch.sh
new file mode 100755
index 000000000..0dc2e837c
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/newpatch.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+# Easy application of new patches.
+# Always adds to the very end. (Bumps last patch nr by 100)
+# Parameters:
+# $1 - patch filename
+# $2 - description
+
+OLD=$(grep ^Patch kernel.spec | tail -n1 | awk '{ print $1 }' | sed s/Patch// | sed s/://)
+NEW=$(($OLD/100*100+100))
+
+sed -i "/^Patch$OLD:\ /a#\ $2\nPatch$NEW:\ $1" kernel.spec
+
+LAST=$(grep ^ApplyPatch kernel.spec | tail -n1 | awk '{ print $2 }')
+
+sed -i "/^ApplyPatch $LAST/aApplyPatch $1" kernel.spec
+
+cvs add $1
+
+scripts/bumpspecfile.py kernel.spec "- $2"
+make clog
+
diff --git a/freed-ora/current/F-12/scripts/pull-upstreams.sh b/freed-ora/current/F-12/scripts/pull-upstreams.sh
new file mode 100755
index 000000000..e94fcfd48
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/pull-upstreams.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+utrace_base=2.6-current
+utrace_base=2.6.34
+
+url=http://people.redhat.com/roland/utrace/${1:-$utrace_base}
+
+wget -q -O /dev/stdout $url/series | grep 'patch$' |
+while read i
+do
+ rm -f linux-2.6-$i
+ wget -nv -O linux-2.6-$i $url/$i
+done
diff --git a/freed-ora/current/F-12/scripts/rebase.sh b/freed-ora/current/F-12/scripts/rebase.sh
new file mode 100755
index 000000000..fc3157b24
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/rebase.sh
@@ -0,0 +1,199 @@
+#!/bin/bash
+
+if [ ! -f /usr/bin/curl ]; then
+ echo yum install curl
+ exit 0
+fi
+
+# Current kernel bits
+if [ `grep -c ^patch upstream` -ge 1 ]; then
+ export OLD=`grep ^patch upstream | tail -n1 | sed s/patch-// | sed s/\.bz2//`
+else
+ export OLD=`grep linux-2.6 upstream | tail -n1 | sed s/linux-// | sed s/\.tar\.bz2//`
+fi
+export OLDBASE=`echo $OLD | sed s/-/\ /g | sed s/2\.6\.// | awk '{ print $1 }'`
+if [ `echo $OLD | grep -c rc` -ge 1 ]; then
+ export OLDRC=`echo $OLD | sed s/-/\ /g | sed s/rc// | awk '{ print $2 }'`
+ if [ `echo $OLD | grep -c git` -ge 1 ]; then
+ export OLDGIT=`echo $OLD | sed s/-/\ /g | sed s/git// | awk '{ print $3 }'`
+ else
+ export OLDGIT=0
+ fi
+else
+ export OLDRC=0
+ if [ `echo $OLD | grep -c git` -ge 1 ]; then
+ export OLDGIT=`echo $OLD | sed s/-/\ /g | sed s/git// | awk '{ print $2 }'`
+ else
+ export OLDGIT=0
+ fi
+fi
+
+# Is there a new snapshot or prepatch ?
+NEW=`curl -s http://www.kernel.org/kdist/finger_banner | grep "latest snapshot 2.6 version"`
+if [ -z "$NEW" ] ; then
+ NEW=`curl -s http://www.kernel.org/kdist/finger_banner | grep "latest mainline 2.6 version"`
+ if [ -z "$NEW" ] ; then
+ if [ "$OLDRC" -ne 0 ] ; then
+ NEW=`curl -s http://www.kernel.org/kdist/finger_banner | grep "latest stable 2.6." | head -n1`
+ else
+ echo "No new rc or git snapshot of stable branch".
+ exit 0
+ fi
+ fi
+fi
+export N=`echo $NEW | awk '{ print $11 }'`
+if [ -z "$N" ]; then
+ # "Stable version"
+ export NEW=`echo $NEW | awk '{ print $10 }'`
+else
+ export NEW=`echo $NEW | awk '{ print $11 }'`
+fi
+
+export NEWBASE=`echo $NEW | sed s/-/\ /g | sed s/2\.6\.// | awk '{ print $1 }'`
+if [ `echo $NEW | grep -c rc` -ge 1 ]; then
+ export NEWRC=`echo $NEW | sed s/-/\ /g | sed s/rc// | awk '{ print $2 }'`
+ if [ `echo $NEW | grep -c git` -ge 1 ]; then
+ export NEWGIT=`echo $NEW | sed s/-/\ /g | sed s/git// | awk '{ print $3 }'`
+ else
+ export NEWGIT=0
+ fi
+else
+ export NEWRC=0
+ if [ `echo $NEW | grep -c git` -ge 1 ]; then
+ export NEWGIT=`echo $NEW | sed s/-/\ /g | sed s/git// | awk '{ print $2 }'`
+ else
+ export NEWGIT=0
+ fi
+fi
+
+echo "OLD kernel was $OLD BASE=$OLDBASE RC=$OLDRC GIT=$OLDGIT"
+echo "NEW kernel is $NEW BASE=$NEWBASE RC=$NEWRC GIT=$NEWGIT"
+
+if [ "$OLDRC" -eq 0 -a "$OLDGIT" -eq 0 -a "$OLDGIT" -ne "$NEWGIT" ]; then
+ echo "Rebasing from a stable release to a new git snapshot"
+ perl -p -i -e 's/^%define\ released_kernel\ 1/\%define\ released_kernel\ 0/' kernel.spec
+ perl -p -i -e 's/^%define\ rawhide_skip_docs\ 1/\%define\ rawhide_skip_docs\ 0/' kernel.spec
+ # force these to zero in this case, they may not have been when we rebased to stable
+ perl -p -i -e 's/^%define\ rcrev.*/\%define\ rcrev\ 0/' kernel.spec
+ perl -p -i -e 's/^%define\ gitrev.*/\%define\ gitrev\ 0/' kernel.spec
+fi
+
+# make sure we build docs at least once per -rc kernel, shut it off otherwise
+if [ "$OLDRC" -ne 0 -a "$NEWRC" -gt "$OLDRC" ]; then
+ perl -p -i -e 's/^%define\ rawhide_skip_docs\ 1/\%define\ rawhide_skip_docs\ 0/' kernel.spec
+else
+ if [ "$NEWRC" -eq "$OLDRC" -a "$NEWGIT" -gt "$OLDGIT" ]; then
+ # common case, same -rc, new -git, make sure docs are off.
+ perl -p -i -e 's/^%define\ rawhide_skip_docs\ 0/\%define\ rawhide_skip_docs\ 1/' kernel.spec
+ fi
+fi
+
+if [ "$NEWRC" -eq 0 -a "$NEWGIT" -eq 0 ]; then
+ echo "Rebasing from -rc to final release."
+ perl -p -i -e 's/^%define\ released_kernel\ 0/\%define\ released_kernel\ 1/' kernel.spec
+ perl -p -i -e 's/^%define\ rawhide_skip_docs\ 1/\%define\ rawhide_skip_docs\ 0/' kernel.spec
+ export OLD_TARBALL_BASE=$(($OLDBASE-1))
+ perl -p -i -e 's/^%define\ base_sublevel\ $ENV{OLD_TARBALL_BASE}/%define\ base_sublevel\ $ENV{NEWBASE}/' kernel.spec
+ perl -p -i -e 's/^%define\ rcrev.*/\%define\ rcrev\ 0/' kernel.spec
+ perl -p -i -e 's/^%define\ gitrev.*/\%define\ gitrev\ 0/' kernel.spec
+
+ grep -v kernel-2.6.$OLD_TARBALL_BASE .cvsignore >.cvsignore.tmp ; mv .cvsignore.tmp .cvsignore
+ echo kernel-2.6.$NEWBASE >> .cvsignore
+
+ for i in upstream sources .cvsignore
+ do
+ grep -v linux-2.6.$OLD_TARBALL_BASE.tar.bz2 $i > .$i.tmp; mv .$i.tmp $i
+ grep -v patch-2.6.$OLDBASE-rc$OLDRC.bz2 $i > .$i.tmp; mv .$i.tmp $i
+ grep -v patch-2.6.$OLDBASE-rc$OLDRC-git$OLDGIT.bz2 $i > .$i.tmp; mv .$i.tmp $i
+ done
+
+ echo linux-2.6.$NEWBASE.tar.bz2 >> upstream
+
+ rm -f linux-2.6.$OLD_TARBALL_BASE.tar.bz2
+ rm -f linux-2.6.$OLD_TARBALL_BASE.tar.bz2.sign
+ rm -f patch-2.6.$OLDBASE-rc$OLDRC.bz2
+ rm -f patch-2.6.$OLDBASE-rc$OLDRC.bz2.sign
+ rm -f patch-2.6.$OLDBASE-rc$OLDRC-git$OLDGIT.bz2
+ rm -f patch-2.6.$OLDBASE-rc$OLDRC-git$OLDGIT.bz2.sign
+
+ cvs remove linux-2.6.$OLD_TARBALL_BASE.tar.bz2.sign
+ cvs remove patch-2.6.$OLDBASE-rc$OLDRC.bz2.sign
+ cvs remove patch-2.6.$OLDBASE-rc$OLDRC-git$OLDGIT.bz2.sign
+
+ make download
+ make upload FILES=linux-$NEW.tar.bz2
+
+ cvs add linux-$NEW.tar.bz2.sign
+
+ bumpspecfile.py kernel.spec "- $NEW"
+ make clog
+ echo FIXME! Fix up fedora_cvs_origin
+ make verrel
+ exit 1
+fi
+
+if [ "$OLDRC" != "$NEWRC" ]; then
+ echo "Different rc. Rebasing from $OLDRC to $NEWRC"
+ perl -p -i -e 's/^%define\ rcrev.*/\%define\ rcrev\ $ENV{"NEWRC"}/' kernel.spec
+ perl -p -i -e 's/$ENV{OLDBASE}-rc$ENV{OLDRC}.bz2/$ENV{NEWBASE}-rc$ENV{NEWRC}.bz2/' .cvsignore
+ perl -p -i -e 's/$ENV{OLDBASE}-rc$ENV{OLDRC}.bz2/$ENV{NEWBASE}-rc$ENV{NEWRC}.bz2/' upstream
+ grep -v patch-2.6.$OLDBASE-rc$OLDRC.bz2 sources > .sources.tmp; mv .sources.tmp sources
+ grep -v patch-2.6.$OLDBASE-rc$OLDRC-git$OLDGIT.bz2 .cvsignore >.cvsignore.tmp ; mv .cvsignore.tmp .cvsignore
+ if [ `grep -c patch-2.6.$NEWBASE-rc$NEWRC.bz2 upstream` -eq 0 ]; then
+ echo patch-2.6.$NEWBASE-rc$NEWRC.bz2 >> .cvsignore
+ echo patch-2.6.$NEWBASE-rc$NEWRC.bz2 >> upstream
+ fi
+ rm -f patch-2.6.$OLDBASE-rc$OLDRC.bz2
+ rm -f patch-2.6.$OLDBASE-rc$OLDRC.bz2.sign
+ cvs remove patch-2.6.$OLDBASE-rc$OLDRC.bz2.sign
+ make download
+ make upload FILES=patch-2.6.$NEWBASE-rc$NEWRC.bz2
+ cvs add patch-2.6.$NEWBASE-rc$NEWRC.bz2.sign
+
+ # Another awkward (albeit unlikely) corner case.
+ # Moving from say 26-rc3-git1 to 26-rc4-git1
+ # The above will grab the new -rc, but the below will
+ # think that the -git hasn't changed.
+ # Fudge around this, by pretending the old git was something crazy.
+ OLDGIT=99
+fi
+
+if [ "$OLDGIT" != "$NEWGIT" ]; then
+ if [ "$OLDRC" -eq 0 -a "$OLDGIT" -eq 0 ]; then
+ echo "Rebasing to pre-rc git$NEWGIT"
+ else
+ echo "Different git. Rebasing from git$OLDGIT to git$NEWGIT"
+ fi
+ perl -p -i -e 's/^%define\ gitrev.*/\%define\ gitrev\ $ENV{"NEWGIT"}/' kernel.spec
+ if [ "$OLDGIT" -ne 0 ]; then
+ if [ "$NEWGIT" -ne 0 ]; then
+ perl -p -i -e 's/$ENV{OLD}/$ENV{NEW}/' .cvsignore
+ perl -p -i -e 's/$ENV{OLD}/$ENV{NEW}/' upstream
+ fi
+ grep -v patch-$OLD.bz2 sources > .sources.tmp; mv .sources.tmp sources
+ grep -v patch-$OLD.bz2 upstream > .upstream.tmp; mv .upstream.tmp upstream
+ else
+ echo patch-$NEW.bz2 >> .cvsignore
+ echo patch-$NEW.bz2 >> upstream
+ fi
+
+ make download
+ make upload FILES=patch-$NEW.bz2
+
+ cvs add patch-$NEW.bz2.sign
+ if [ "$OLDGIT" -ne 0 ]; then
+ rm -f patch-$OLD.bz2
+ rm -f patch-$OLD.bz2.sign
+ cvs remove patch-$OLD.bz2.sign
+ fi
+fi
+
+if [ "$OLDRC" != "$NEWRC" -o "$OLDGIT" != "$NEWGIT" ]; then
+ perl -p -i -e 's|^ApplyPatch\ git-linus.diff|#ApplyPatch\ git-linus.diff|' kernel.spec
+ > git-linus.diff
+ bumpspecfile.py kernel.spec "- $NEW"
+ make clog
+ exit 1
+else
+ exit 0
+fi
diff --git a/freed-ora/current/F-12/scripts/reconfig.sh b/freed-ora/current/F-12/scripts/reconfig.sh
new file mode 100755
index 000000000..d9e8fa7c3
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/reconfig.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+base_sublevel=$(grep "^%define base_sublevel" kernel.spec | head -n1 | awk '{ print $3 }')
+
+#if [ `grep -c "^%define released_kernel 1" kernel.spec` -ge 1 ]; then
+ V=$base_sublevel
+#else
+# let V=$base_sublevel+1
+#fi
+
+cd kernel-2.6.$base_sublevel/linux-2.6.$base_sublevel.noarch/
+rm -f kernel-*.config
+cp ../../kernel-2.6.$V-*.config .
+
+for i in kernel-*.config
+do
+ echo $i
+ rm -f .config
+ cp $i .config
+ Arch=`head -1 .config | cut -b 3-`
+ make ARCH=$Arch nonint_oldconfig > /dev/null || exit 1
+ echo "# $Arch" > configs/$i
+ cat .config >> configs/$i
+ echo
+done
+
diff --git a/freed-ora/current/F-12/scripts/rediffall.pl b/freed-ora/current/F-12/scripts/rediffall.pl
new file mode 100644
index 000000000..29f12beb9
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/rediffall.pl
@@ -0,0 +1,64 @@
+#!/usr/bin/perl -w
+#
+# Script to rediff all patches in the spec
+# Usage: perl -w rediffall.pl < kernel-2.4.spec
+#
+# $workdir is where the new rediff'ed patches are created
+# $origdir is where the original patches and tarball are located
+#
+# Note that both $workdir and $origdir must be absolute path names.
+# Suggestion: create a /kernel symbolic link to the top of your CVS tree.
+
+my $workdir = "/dev/shm/redifftree";
+my $origdir = "/home/davej/devel";
+my $kernver = "linux-2.6.17";
+my $datestrip = "s/^\\(\\(+++\\|---\\) [^[:blank:]]\\+\\)[[:blank:]].*/\\1/";
+my $patchindex = 0;
+my @patchlist;
+
+# phase 1: create a tree
+print "Extracting pristine source..\n";
+system("mkdir -p $workdir");
+system("rm -rf $workdir/*");
+chdir("$workdir");
+system("tar -jxvf $origdir/$kernver.tar.bz2 > /dev/null");
+system("cp -al $kernver linux-$patchindex");
+
+# phase 2: read the spec from stdin and store all patches
+print "Reading specfile..\n";
+
+while (<>) {
+ my $line = $_;
+ if ($line =~ /^Patch([0-9]+)\: ([a-zA-Z0-9\-\_\.\+]+\.patch)/) {
+ $patchlist[$1] = $2;
+ } else {
+ if ($line =~ /^Patch([0-9]+)\: ([a-zA-Z0-9\-\_\.]+\.bz2)/) {
+ $patchlist[$1] = $2;
+ }
+ }
+
+ if ($line =~ /^%patch([0-9]+) -p1/) {
+ # copy the tree, apply the patch, diff and remove the old tree
+ my $oldindex = $patchindex;
+ $patchindex = $1;
+
+ print "rediffing patch number $patchindex: $patchlist[$patchindex]\n";
+
+ system("cp -al linux-$oldindex linux-$patchindex");
+ chdir("linux-$patchindex");
+ if ($patchlist[$patchindex] =~ /bz2/) {
+ system("bzcat $origdir/$patchlist[$patchindex] | patch -p1 &>/dev/null");
+ } else {
+ system("cat $origdir/$patchlist[$patchindex] | patch -p1 &>/dev/null");
+ }
+ chdir("$workdir");
+ system("rm -f `find -name \"*orig\"`");
+ if ($patchlist[$patchindex] =~ /bz2/) {
+ } else {
+ system("diff -urNp --exclude-from=/home/davej/.exclude linux-$oldindex linux-$patchindex | sed '$datestrip' > $patchlist[$patchindex]");
+ }
+ system("rm -rf linux-$oldindex");
+ }
+};
+
+1;
diff --git a/freed-ora/current/F-12/scripts/sort-config b/freed-ora/current/F-12/scripts/sort-config
new file mode 100755
index 000000000..bc497ea0b
--- /dev/null
+++ b/freed-ora/current/F-12/scripts/sort-config
@@ -0,0 +1,222 @@
+#!/bin/bash
+
+SRC=($(ls config-* 2>/dev/null))
+TGT=($(ls kernel-*/linux-*.noarch/configs/kernel-2.6.*-*.config \
+ kernel-*/linux-*.noarch/configs/kernel-2.6.*-*-debug.config 2>/dev/null))
+TGT1=(${TGT[*]#kernel-*/linux-*.noarch/configs/kernel-2.6.*-})
+
+ALL_OPTS="cdfimn"
+if [ $# -lt 2 ] ; then
+ echo -e "Usage:\n $(basename $0) [-$ALL_OPTS] input target\n"
+ echo -e " Sort input config file into the same order as the target\n"
+ echo -e " -c: insert comments about non-matching/impossible items"
+ echo -e " -d: show raw unsorted output with extra debug text"
+ echo -e " -f: force output to match what is in the target config,"
+ echo -e " and/or remove impossible config items"
+ echo -e " -i: find impossible config items"
+ echo -e " -m: find changed config items"
+ echo -e " -n: do not sort output\n"
+ echo -e " input: source config file" ' [' "${SRC[*]#config-}" ']\n'
+ echo -e " target: output arch name" ' [' "${TGT1[*]%.config}" ']\n'
+ exit 1
+fi
+
+while getopts "$ALL_OPTS" OPTION ; do
+case $OPTION in
+c)
+ ADDCOMMENT=1 ;;
+d)
+ DEBUG=1 ;;
+f)
+ FORCE=1 ;;
+i)
+ FIND_IMPOSS=1 ;;
+m)
+ FIND_CHANGED=1 ;;
+n)
+ NOSORT=1 ;;
+\?)
+ exit 2 ;;
+esac
+done
+
+if [ "$FORCE" -a "$ADDCOMMENT" ] ; then
+ echo "-f and -c options cannot be used together"
+ exit 2
+fi
+
+shift $((OPTIND-1))
+
+TEMPFILES="xx00 xx01 xx98 xx99"
+TEMPLEFT=
+for FILE in $TEMPFILES ; do
+ [ -f "$FILE" ] && TEMPLEFT="Y"
+done
+if [ "$TEMPLEFT" ] ; then
+ echo "WARNING! Output files named xx?? already exist." >&2
+ read -p "Press <Enter> to erase files, or Ctrl-C to exit..."
+ echo >&2
+fi
+rm -f $TEMPFILES
+
+SRCFILE=config-$1
+[ ! -f $SRCFILE ] && echo "Input file" $SRCFILE "missing" && exit 2
+
+TGTFILE=kernel-*/linux-*.noarch/configs/kernel-2.6.*-$2.config
+[ ! -f $TGTFILE ] && echo "No target file matching" $TGTFILE "exists" && exit 2
+
+[ "$FIND_IMPOSS" ] && \
+ find kernel-*/*.noarch -name Kconfig\* -type f \
+ | xargs egrep -s -h '^[[:space:]]*(menu)?config[[:space:]]+' \
+ | sed -r 's/^[[:space:]]*(menu)?config[[:space:]]+/CONFIG_/' \
+ | sort | uniq >xx98
+
+extract_optname() {
+ # extract the option name from $TEXT, setting $OPTNAME
+ OPTNAME=
+ if [ "${TEXT:0:7}" = "CONFIG_" ] ; then
+ OPTNAME=${TEXT%%=*}
+ elif [ "${TEXT:0:9}" = "# CONFIG_" ] ; then
+ OPTNAME=${TEXT%" is not set"}
+ OPTNAME=${OPTNAME#\# }
+ fi
+}
+
+print_saved_comments() {
+ if [ $IX -gt 0 ] ; then
+ [ "$DEBUG" ] && echo " ->" $IX "comments were saved"
+ (( IX-- ))
+ for IX in $(seq 0 $IX) ; do
+ echo "$LINE":"${SAVECOMMENT[$IX]}"
+ done
+ unset SAVECOMMENT
+ IX=0
+ fi
+}
+
+assign_line_number() {
+ # use input line numbers if not sorting
+ [ "$NOSORT" ] && LINE=$IN
+ # make sure it has a line number
+ [ -z "$LINE" ] && LINE=999999
+}
+
+IX=0
+IN=0
+declare -a SAVECOMMENT
+
+cat ${SRCFILE} | {
+while read TEXT ; do
+
+ LINE=
+ COMMENT=
+
+ # replace empty lines
+ [ -z "$TEXT" ] && TEXT='//'
+
+ if [ "${TEXT:0:7}" = "CONFIG_" -o "${TEXT:0:9}" = "# CONFIG_" ] ; then
+
+ LINE=$(grep -n "^$TEXT" $TGTFILE | head -1 | cut -f 1 -d ':')
+ if [ -z "$LINE" ] ; then
+ [ "$DEBUG" ] && echo "nofind ->" "$TEXT"
+
+ extract_optname
+ if [ "$OPTNAME" ] ; then
+
+ if [ "$FIND_CHANGED" ] ; then
+ for FINDTEXT in "^${OPTNAME}=" "^# ${OPTNAME} is not set" ; do
+ if [ -z "$LINE" ] ; then
+ [ "$DEBUG" ] && echo "looking for ->" "$FINDTEXT"
+ LINE=$(grep -n "$FINDTEXT" $TGTFILE | head -1 | cut -f 1 -d ':')
+ if [ "$LINE" ] ; then
+ CHANGED=$(grep "$FINDTEXT" $TGTFILE | head -1)
+ if [ "$FORCE" ] ; then
+ TEXT=$CHANGED
+ [ "$DEBUG" ] && echo 'forced ->' "$TEXT"
+ else
+ if [ "$ADDCOMMENT" ] ; then
+ if [ ${CHANGED:0:1} = '#' ] ; then
+ NEWOPT="not set"
+ else
+ NEWOPT=${CHANGED#$OPTNAME}
+ fi
+ COMMENT="# -- Next option changed to \"${NEWOPT}\" at target line $LINE --"
+ fi
+ fi
+ fi
+ fi
+ done
+ fi
+
+ if [ "$FIND_IMPOSS" -a -z "$LINE" -a -z "$COMMENT" ] ; then
+ POSSIBLE=$(grep -n "^$OPTNAME" xx98)
+ if [ -z "$POSSIBLE" ] ; then
+ if [ "$ADDCOMMENT" ] ; then
+ COMMENT="# -- Next option is impossible --"
+ elif [ "$FORCE" ] ; then
+ [ "$DEBUG" ] && echo 'impossible ->' "$TEXT"
+ TEXT=""
+ fi
+ fi
+ fi
+
+ fi
+
+ fi
+
+ else
+ # not a config variable
+ COMMENT="$TEXT"
+ TEXT=
+ fi
+
+ [ "$DEBUG" -a "$COMMENT" ] && echo "comment ->" "$LINE" "$COMMENT"
+ [ "$DEBUG" -a "$TEXT" ] && echo "text ->" "$LINE" "$TEXT"
+
+ if [ "$TEXT" ] ; then
+
+ assign_line_number
+
+ # print the saved comments first
+ print_saved_comments
+ # now print the latest comment and text
+ [ "$COMMENT" ] && echo "$LINE":"$COMMENT"
+ echo "$LINE":"$TEXT"
+
+ elif [ "$COMMENT" ] ; then
+
+ # no output yet, save the comment
+ SAVECOMMENT[$IX]="$COMMENT"
+ let IX++
+ [ "$DEBUG" ] && echo 'savecomment (#'${IX}')'
+
+ fi
+
+ let IN++
+
+done
+# flush the buffers
+assign_line_number
+print_saved_comments
+[ "$DEBUG" ] && echo "$IN lines read from input"
+} >xx99
+
+if [ "$DEBUG" ] ; then
+ # just show the raw output with debug info, then exit
+ cat xx99
+else
+
+ # split output into two files, for matched and unmatched items
+ cat xx99 | sort -s -t ":" -k 1g | csplit -k -s - /^999999/ 2>/dev/null
+
+ cat xx00 | cut -f 2- -d ':' | sed 's/^\/\/$//'
+ if [ -s xx01 ] ; then
+ echo
+ echo '# ------------ UNMATCHED OPTIONS ------------'
+ echo
+ cat xx01 | cut -f 2- -d ':' | sed 's/^\/\/$//'
+ fi
+
+fi
+
+rm -f $TEMPFILES
diff --git a/freed-ora/current/F-12/sky2-optima-add-missing-write-bits.patch b/freed-ora/current/F-12/sky2-optima-add-missing-write-bits.patch
new file mode 100644
index 000000000..98b924dad
--- /dev/null
+++ b/freed-ora/current/F-12/sky2-optima-add-missing-write-bits.patch
@@ -0,0 +1,46 @@
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 3 Dec 2009 05:12:02 +0000 (+0000)
+Subject: net: Add missing TST_CFG_WRITE bits around sky2_pci_write
+X-Git-Tag: v2.6.33-rc1~59^2~38
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=d66f0b20b2f8eac365fadf5ca492efe4ba539446
+
+net: Add missing TST_CFG_WRITE bits around sky2_pci_write
+
+Add missing TST_CFG_WRITE bits around sky2_pci_write*() in Optima
+setup routines. Without the cfg-write bits, the driver may spew endless
+link-up messages through qlink irq.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
+index 050e6b5..013c9f5 100644
+--- a/drivers/net/sky2.c
++++ b/drivers/net/sky2.c
+@@ -2152,7 +2152,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
+
+ /* reset PHY Link Detect */
+ phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
++ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
++ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ sky2_link_up(sky2);
+ }
+@@ -3082,6 +3084,7 @@ static void sky2_reset(struct sky2_hw *hw)
+ reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
+
+ /* reset PHY Link Detect */
++ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ sky2_pci_write16(hw, PSM_CONFIG_REG4,
+ reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
+ sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
+@@ -3099,6 +3102,7 @@ static void sky2_reset(struct sky2_hw *hw)
+ /* restore the PCIe Link Control register */
+ sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
+ }
++ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
diff --git a/freed-ora/current/F-12/sky2-optima-add-register-definitions.patch b/freed-ora/current/F-12/sky2-optima-add-register-definitions.patch
new file mode 100644
index 000000000..78311ffed
--- /dev/null
+++ b/freed-ora/current/F-12/sky2-optima-add-register-definitions.patch
@@ -0,0 +1,281 @@
+From: Stephen Hemminger <shemminger@vyatta.com>
+Date: Thu, 29 Oct 2009 06:37:06 +0000 (+0000)
+Subject: sky2: add register definitions for new chips
+X-Git-Tag: v2.6.33-rc1~388^2~591
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=e91cd2e65f22a80af87367178bed4957fdc45ecd
+
+sky2: add register definitions for new chips
+
+This adds infrastructure for the newer chip versions and workarounds.
+Extracted from the vendor (GPL) driver.
+
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
+index ed54129..e13da94 100644
+--- a/drivers/net/sky2.h
++++ b/drivers/net/sky2.h
+@@ -16,6 +16,13 @@ enum {
+ PCI_DEV_REG5 = 0x88,
+ PCI_CFG_REG_0 = 0x90,
+ PCI_CFG_REG_1 = 0x94,
++
++ PSM_CONFIG_REG0 = 0x98,
++ PSM_CONFIG_REG1 = 0x9C,
++ PSM_CONFIG_REG2 = 0x160,
++ PSM_CONFIG_REG3 = 0x164,
++ PSM_CONFIG_REG4 = 0x168,
++
+ };
+
+ /* Yukon-2 */
+@@ -48,6 +55,37 @@ enum pci_dev_reg_2 {
+ PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
+ };
+
++/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */
++enum pci_dev_reg_3 {
++ P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
++ P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */
++ P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
++ P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */
++ P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */
++ P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */
++ P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */
++ P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */
++ P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */
++ P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */
++ P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */
++ P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */
++ P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */
++ P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */
++ P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */
++ P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */
++ P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */
++ P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */
++ P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */
++ P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */
++ PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
++ P_CLK_COR_REGS_D0_DIS |
++ P_CLK_COR_LNK1_D0_DIS |
++ P_CLK_MAC_LNK1_D0_DIS |
++ P_CLK_PCI_MST_ARB_DIS |
++ P_CLK_COR_COMMON_DIS |
++ P_CLK_COR_LNK1_BMU_DIS,
++};
++
+ /* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
+ enum pci_dev_reg_4 {
+ /* (Link Training & Status State Machine) */
+@@ -114,7 +152,7 @@ enum pci_dev_reg_5 {
+ P_GAT_PCIE_RX_EL_IDLE,
+ };
+
+-#/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
++/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
+ enum pci_cfg_reg1 {
+ P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */
+ /* Bit 23..21: Release Clock on Event */
+@@ -145,6 +183,72 @@ enum pci_cfg_reg1 {
+ P_CF1_ENA_TXBMU_WR_IDLE,
+ };
+
++/* Yukon-Optima */
++enum {
++ PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */
++
++ PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */
++ PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */
++
++ PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */
++
++ PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */
++ PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */
++ PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */
++ PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */
++
++ PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */
++
++ PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */
++ PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */
++};
++
++/* Yukon-Supreme */
++enum {
++ PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */
++
++ PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */
++ PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */
++ PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
++ PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */
++ PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */
++ PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */
++ PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */
++ PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */
++ PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */
++ PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
++ PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */
++ PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */
++ PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */
++ PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */
++ PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
++ PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
++ PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
++ PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */
++ PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */
++
++ PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
++ PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */
++ PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
++ PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
++ PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */
++ PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
++ PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */
++ PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */
++ PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */
++ PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */
++};
++
++/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */
++enum {
++ /* PHY Link Detect Timer */
++ PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
++ PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
++
++ PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */
++ PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
++};
++
+
+ #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+ PCI_STATUS_SIG_SYSTEM_ERROR | \
+@@ -197,6 +301,9 @@ enum csr_regs {
+ B2_I2C_IRQ = 0x0168,
+ B2_I2C_SW = 0x016c,
+
++ Y2_PEX_PHY_DATA = 0x0170,
++ Y2_PEX_PHY_ADDR = 0x0172,
++
+ B3_RAM_ADDR = 0x0180,
+ B3_RAM_DATA_LO = 0x0184,
+ B3_RAM_DATA_HI = 0x0188,
+@@ -317,6 +424,10 @@ enum {
+ Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
+ Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
+
++ Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */
++ Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */
++ Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */
++
+ Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
+ Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
+ Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
+@@ -435,6 +546,7 @@ enum {
+ CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
+ CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
+ CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
++ CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
+ };
+ enum yukon_ec_rev {
+ CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
+@@ -459,6 +571,8 @@ enum yukon_ex_rev {
+ };
+ enum yukon_supr_rev {
+ CHIP_REV_YU_SU_A0 = 0,
++ CHIP_REV_YU_SU_B0 = 1,
++ CHIP_REV_YU_SU_B1 = 3,
+ };
+
+
+@@ -513,6 +627,12 @@ enum {
+ TIM_T_STEP = 1<<0, /* Test step */
+ };
+
++/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */
++enum {
++ PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */
++ PEX_DB_ACCESS = 1<<30, /* Access to debug register */
++};
++
+ /* B3_RAM_ADDR 32 bit RAM Address, to read or write */
+ /* Bit 31..19: reserved */
+ #define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
+@@ -754,6 +874,42 @@ enum {
+ BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
+ };
+
++/* TBMU_TEST 0x06B8 Transmit BMU Test Register */
++enum {
++ TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
++ TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
++ TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */
++ TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */
++ TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */
++ TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */
++ TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */
++ TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */
++
++ TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */
++ TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */
++ TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */
++
++ TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */
++ TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */
++ TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */
++
++ TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */
++ TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */
++ TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */
++
++ TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */
++ TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */
++ TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */
++
++ TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */
++ TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */
++ TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */
++
++ TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */
++ TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */
++ TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */
++};
++
+ /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+ /* PREF_UNIT_CTRL 32 bit Prefetch Control register */
+ enum {
+@@ -1674,6 +1830,12 @@ enum {
+
+ /* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
+ enum {
++ RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */
++ RX_GCLKMAC_OFF = 1<<30,
++
++ RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */
++ RX_STFW_ENA = 1<<28,
++
+ RX_TRUNC_ON = 1<<27, /* enable packet truncation */
+ RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
+ RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
+@@ -1711,6 +1873,20 @@ enum {
+ GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
+ };
+
++/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */
++enum {
++ RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */
++ RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */
++ RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */
++ RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */
++ RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */
++ RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */
++ RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */
++ RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */
++ RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */
++ RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */
++};
++
+ /* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
+ enum {
+ TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */
diff --git a/freed-ora/current/F-12/sky2-optima-fix-tcp-offload.patch b/freed-ora/current/F-12/sky2-optima-fix-tcp-offload.patch
new file mode 100644
index 000000000..d0f78ed58
--- /dev/null
+++ b/freed-ora/current/F-12/sky2-optima-fix-tcp-offload.patch
@@ -0,0 +1,33 @@
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 3 Dec 2009 05:12:01 +0000 (+0000)
+Subject: net: Fix Yukon-2 Optima TCP offload setup
+X-Git-Tag: v2.6.33-rc1~59^2~39
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=b338682dc5c20e8ff986e58407bdb6e3a3e3f0a3
+
+net: Fix Yukon-2 Optima TCP offload setup
+
+Fix the TCP offload setup for Yukon-2 Optima.
+It requires SKY2_HW_NE_LE flag unlike Ultra 2.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
+index 3943d89..050e6b5 100644
+--- a/drivers/net/sky2.c
++++ b/drivers/net/sky2.c
+@@ -2968,8 +2968,13 @@ static int __devinit sky2_init(struct sky2_hw *hw)
+ break;
+
+ case CHIP_ID_YUKON_UL_2:
++ hw->flags = SKY2_HW_GIGABIT
++ | SKY2_HW_ADV_POWER_CTL;
++ break;
++
+ case CHIP_ID_YUKON_OPT:
+ hw->flags = SKY2_HW_GIGABIT
++ | SKY2_HW_NEW_LE
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+
diff --git a/freed-ora/current/F-12/sky2-optima-print-chip-name.patch b/freed-ora/current/F-12/sky2-optima-print-chip-name.patch
new file mode 100644
index 000000000..0a2e148f8
--- /dev/null
+++ b/freed-ora/current/F-12/sky2-optima-print-chip-name.patch
@@ -0,0 +1,27 @@
+From: stephen hemminger <shemminger@vyatta.com>
+Date: Mon, 14 Dec 2009 08:33:47 +0000 (+0000)
+Subject: sky2: print Optima chip name
+X-Git-Tag: v2.6.33-rc1~59^2~9
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=dae3a5112d258764cad9e48439ca7dd05c2edca1
+
+sky2: print Optima chip name
+
+Off by one in name lookup makes Optima display as (chip 0xbc)
+
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
+index 89a05d6..9431f64 100644
+--- a/drivers/net/sky2.c
++++ b/drivers/net/sky2.c
+@@ -4530,7 +4530,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
+ "Optima", /* 0xbc */
+ };
+
+- if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
++ if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OPT)
+ strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
+ else
+ snprintf(buf, sz, "(chip %#x)", chipid);
diff --git a/freed-ora/current/F-12/sky2-optima-support.patch b/freed-ora/current/F-12/sky2-optima-support.patch
new file mode 100644
index 000000000..f56f9baeb
--- /dev/null
+++ b/freed-ora/current/F-12/sky2-optima-support.patch
@@ -0,0 +1,157 @@
+From: Stephen Hemminger <shemminger@vyatta.com>
+Date: Thu, 29 Oct 2009 06:37:09 +0000 (+0000)
+Subject: sky2: 88E8059 support
+X-Git-Tag: v2.6.33-rc1~388^2~588
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=0f5aac7070a01ec757ed243febe4fff7c944c4d2
+
+sky2: 88E8059 support
+
+Tentative support for newer Marvell hardware including
+the Yukon-2 Optima chip. Do not have hatdware to test this yet,
+code is based on vendor driver.
+
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
+index 3387a2f..53cce74 100644
+--- a/drivers/net/sky2.c
++++ b/drivers/net/sky2.c
+@@ -140,6 +140,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { 0 }
+ };
+
+@@ -603,6 +604,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
+ /* apply workaround for integrated resistors calibration */
+ gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
+ gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
++ } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
++ /* apply fixes in PHY AFE */
++ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
++
++ /* apply RDAC termination workaround */
++ gm_phy_write(hw, port, 24, 0x2800);
++ gm_phy_write(hw, port, 23, 0x2001);
++
++ /* set page register back to 0 */
++ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
+ hw->chip_id < CHIP_ID_YUKON_SUPR) {
+ /* no effect on Yukon-XL */
+@@ -2127,6 +2138,25 @@ out:
+ spin_unlock(&sky2->phy_lock);
+ }
+
++/* Special quick link interrupt (Yukon-2 Optima only) */
++static void sky2_qlink_intr(struct sky2_hw *hw)
++{
++ struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
++ u32 imask;
++ u16 phy;
++
++ /* disable irq */
++ imask = sky2_read32(hw, B0_IMSK);
++ imask &= ~Y2_IS_PHY_QLNK;
++ sky2_write32(hw, B0_IMSK, imask);
++
++ /* reset PHY Link Detect */
++ phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
++ sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
++
++ sky2_link_up(sky2);
++}
++
+ /* Transmit timeout is only called if we are running, carrier is up
+ * and tx queue is full (stopped).
+ */
+@@ -2796,6 +2826,9 @@ static int sky2_poll(struct napi_struct *napi, int work_limit)
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
+
++ if (status & Y2_IS_PHY_QLNK)
++ sky2_qlink_intr(hw);
++
+ while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
+ work_done += sky2_status_intr(hw, work_limit - work_done, idx);
+
+@@ -2845,6 +2878,7 @@ static u32 sky2_mhz(const struct sky2_hw *hw)
+ case CHIP_ID_YUKON_EX:
+ case CHIP_ID_YUKON_SUPR:
+ case CHIP_ID_YUKON_UL_2:
++ case CHIP_ID_YUKON_OPT:
+ return 125;
+
+ case CHIP_ID_YUKON_FE:
+@@ -2934,6 +2968,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
+ break;
+
+ case CHIP_ID_YUKON_UL_2:
++ case CHIP_ID_YUKON_OPT:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+@@ -3024,6 +3059,46 @@ static void sky2_reset(struct sky2_hw *hw)
+ sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
+ }
+
++ if (hw->chip_id == CHIP_ID_YUKON_OPT) {
++ u16 reg;
++ u32 msk;
++
++ if (hw->chip_rev == 0) {
++ /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
++ sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
++
++ /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
++ reg = 10;
++ } else {
++ /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
++ reg = 3;
++ }
++
++ reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
++
++ /* reset PHY Link Detect */
++ sky2_pci_write16(hw, PSM_CONFIG_REG4,
++ reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
++ sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
++
++
++ /* enable PHY Quick Link */
++ msk = sky2_read32(hw, B0_IMSK);
++ msk |= Y2_IS_PHY_QLNK;
++ sky2_write32(hw, B0_IMSK, msk);
++
++ /* check if PSMv2 was running before */
++ reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
++ if (reg & PCI_EXP_LNKCTL_ASPMC) {
++ int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++ /* restore the PCIe Link Control register */
++ sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
++ }
++
++ /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
++ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
++ }
++
+ /* Clear I2C IRQ noise */
+ sky2_write32(hw, B2_I2C_IRQ, 1);
+
+@@ -4442,9 +4517,11 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
+ "FE+", /* 0xb8 */
+ "Supreme", /* 0xb9 */
+ "UL 2", /* 0xba */
++ "Unknown", /* 0xbb */
++ "Optima", /* 0xbc */
+ };
+
+- if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_UL_2)
++ if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
+ strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
+ else
+ snprintf(buf, sz, "(chip %#x)", chipid);
diff --git a/freed-ora/current/F-12/sources b/freed-ora/current/F-12/sources
index dd4794b9e..d9cebf418 100644
--- a/freed-ora/current/F-12/sources
+++ b/freed-ora/current/F-12/sources
@@ -1,2 +1,2 @@
82f8fc14bf087bbb15ae5723533c56ee linux-2.6.32-libre1.tar.bz2
-95fbc761d0430491de14876b588c102d patch-libre-2.6.32.16.bz2
+196fc65cc1d4273b0f4a0edf899ac44f patch-libre-2.6.32.18.bz2
diff --git a/freed-ora/current/F-12/ssb_check_for_sprom.patch b/freed-ora/current/F-12/ssb_check_for_sprom.patch
new file mode 100644
index 000000000..7df784f85
--- /dev/null
+++ b/freed-ora/current/F-12/ssb_check_for_sprom.patch
@@ -0,0 +1,185 @@
+From 380bed7aa858cbe2d4eeb783e2bed7d01828518d Mon Sep 17 00:00:00 2001
+From: John W. Linville <linville@tuxdriver.com>
+Date: Fri, 19 Mar 2010 14:58:01 -0400
+Subject: [PATCH v4] ssb: do not read SPROM if it does not exist
+
+Attempting to read registers that don't exist on the SSB bus can cause
+hangs on some boxes. At least some b43 devices are 'in the wild' that
+don't have SPROMs at all. When the SSB bus support loads, it attempts
+to read these (non-existant) SPROMs and causes hard hangs on the box --
+no console output, etc.
+
+This patch adds some intelligence to determine whether or not the SPROM
+is present before attempting to read it. This avoids those hard hangs
+on those devices with no SPROM attached to their SSB bus. The
+SSB-attached devices (e.g. b43, et al.) won't work, but at least the box
+will survive to test further patches. :-)
+
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Cc: Larry Finger <Larry.Finger@lwfinger.net>
+Cc: Michael Buesch <mb@bu3sch.de>
+Cc: stable@kernel.org
+---
+Version 4, move read of ChipCommon status register to ssb_chipcommon_init
+Version 3, add missing semi-colon... :-(
+Version 2, check the correct place for ChipCommon core revision... :-)
+
+ drivers/ssb/driver_chipcommon.c | 3 +++
+ drivers/ssb/pci.c | 3 +++
+ drivers/ssb/sprom.c | 22 ++++++++++++++++++++++
+ include/linux/ssb/ssb.h | 3 +++
+ include/linux/ssb/ssb_driver_chipcommon.h | 15 +++++++++++++++
+ 5 files changed, 46 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
+index 9681536..6cf288d 100644
+--- a/drivers/ssb/driver_chipcommon.c
++++ b/drivers/ssb/driver_chipcommon.c
+@@ -233,6 +233,9 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
+ {
+ if (!cc->dev)
+ return; /* We don't have a ChipCommon */
++ if (cc->dev->id.revision >= 11) {
++ cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
++ }
+ ssb_pmu_init(cc);
+ chipco_powercontrol_init(cc);
+ ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
+diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
+index 9e50896..2f7b16d 100644
+--- a/drivers/ssb/pci.c
++++ b/drivers/ssb/pci.c
+@@ -620,6 +620,9 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
+ int err = -ENOMEM;
+ u16 *buf;
+
++ if (!ssb_is_sprom_available(bus))
++ return -ENODEV;
++
+ buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ goto out;
+diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
+index d0e6762..55eb9b0 100644
+--- a/drivers/ssb/sprom.c
++++ b/drivers/ssb/sprom.c
+@@ -175,3 +175,25 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void)
+ {
+ return fallback_sprom;
+ }
++
++bool ssb_is_sprom_available(struct ssb_bus *bus)
++{
++ /* status register only exists on chipcomon rev >= 11 */
++ if (bus->chipco.dev->id.revision < 11)
++ return true;
++
++ switch (bus->chip_id) {
++ case 0x4312:
++ return SSB_CHIPCO_CHST_4312_SPROM_PRESENT(bus->chipco.status);
++ case 0x4322:
++ return SSB_CHIPCO_CHST_4322_SPROM_PRESENT(bus->chipco.status);
++ case 0x4325:
++ return SSB_CHIPCO_CHST_4325_SPROM_PRESENT(bus->chipco.status);
++ default:
++ break;
++ }
++ if (bus->chipco.dev->id.revision >= 31)
++ return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
++
++ return true;
++}
+diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
+index 24f9885..3b4da23 100644
+--- a/include/linux/ssb/ssb.h
++++ b/include/linux/ssb/ssb.h
+@@ -394,6 +394,9 @@ extern int ssb_bus_sdiobus_register(struct ssb_bus *bus,
+
+ extern void ssb_bus_unregister(struct ssb_bus *bus);
+
++/* Does the device have an SPROM? */
++extern bool ssb_is_sprom_available(struct ssb_bus *bus);
++
+ /* Set a fallback SPROM.
+ * See kdoc at the function definition for complete documentation. */
+ extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom);
+diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
+index 4e27acf..2cdf249 100644
+--- a/include/linux/ssb/ssb_driver_chipcommon.h
++++ b/include/linux/ssb/ssb_driver_chipcommon.h
+@@ -53,6 +53,7 @@
+ #define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */
+ #define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */
+ #define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */
++#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */
+ #define SSB_CHIPCO_CORECTL 0x0008
+ #define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */
+ #define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
+@@ -385,6 +386,7 @@
+
+
+ /** Chip specific Chip-Status register contents. */
++#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */
+ #define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003
+ #define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
+ #define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
+@@ -398,6 +400,18 @@
+ #define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4
+ #define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */
+
++/** Macros to determine SPROM presence based on Chip-Status register. */
++#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \
++ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
++ SSB_CHIPCO_CHST_4325_OTP_SEL)
++#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \
++ (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS)
++#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \
++ (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
++ SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \
++ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
++ SSB_CHIPCO_CHST_4325_OTP_SEL))
++
+
+
+ /** Clockcontrol masks and values **/
+@@ -564,6 +578,7 @@ struct ssb_chipcommon_pmu {
+ struct ssb_chipcommon {
+ struct ssb_device *dev;
+ u32 capabilities;
++ u32 status;
+ /* Fast Powerup Delay constant */
+ u16 fast_pwrup_delay;
+ struct ssb_chipcommon_pmu pmu;
+--
+1.6.2.5
+
+From ec032742062ad1b01dfe75cfccdbc5b850837c23 Mon Sep 17 00:00:00 2001
+From: John W. Linville <linville@tuxdriver.com>
+Date: Tue, 30 Mar 2010 13:47:39 -0400
+Subject: [PATCH] ssb: avoid null ptr deref in ssb_is_sprom_available
+
+Some older devices don't have chipcommon, but they do have SPROM.
+
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+---
+ drivers/ssb/sprom.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
+index 55eb9b0..874d8f1 100644
+--- a/drivers/ssb/sprom.c
++++ b/drivers/ssb/sprom.c
+@@ -178,6 +178,10 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void)
+
+ bool ssb_is_sprom_available(struct ssb_bus *bus)
+ {
++ /* some older devices don't have chipcommon, but they have sprom */
++ if (!bus->chipco.dev)
++ return true;
++
+ /* status register only exists on chipcomon rev >= 11 */
+ if (bus->chipco.dev->id.revision < 11)
+ return true;
+--
+1.6.2.5
+
diff --git a/freed-ora/current/F-12/thinkpad-acpi-add-x100e.patch b/freed-ora/current/F-12/thinkpad-acpi-add-x100e.patch
new file mode 100644
index 000000000..0ccd06223
--- /dev/null
+++ b/freed-ora/current/F-12/thinkpad-acpi-add-x100e.patch
@@ -0,0 +1,11 @@
+diff -up linux-2.6.32.noarch/drivers/platform/x86/thinkpad_acpi.c.mjg linux-2.6.32.noarch/drivers/platform/x86/thinkpad_acpi.c
+--- linux-2.6.32.noarch/drivers/platform/x86/thinkpad_acpi.c.mjg 2010-04-21 10:02:53.658034129 -0400
++++ linux-2.6.32.noarch/drivers/platform/x86/thinkpad_acpi.c 2010-04-21 10:03:30.402030108 -0400
+@@ -491,6 +491,7 @@ TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA.
+ "\\_SB.PCI0.ISA.EC", /* A21e, A2xm/p, T20-22, X20-21 */
+ "\\_SB.PCI0.AD4S.EC0", /* i1400, R30 */
+ "\\_SB.PCI0.ICH3.EC0", /* R31 */
++ "\\_SB.PCI0.LPC0.EC", /* X100e */
+ "\\_SB.PCI0.LPC.EC", /* all others */
+ );
+
diff --git a/freed-ora/current/F-12/thinkpad-acpi-fix-backlight.patch b/freed-ora/current/F-12/thinkpad-acpi-fix-backlight.patch
new file mode 100644
index 000000000..163902c9d
--- /dev/null
+++ b/freed-ora/current/F-12/thinkpad-acpi-fix-backlight.patch
@@ -0,0 +1,56 @@
+diff -up linux-2.6.32.noarch/drivers/platform/x86/thinkpad_acpi.c.orig linux-2.6.32.noarch/drivers/platform/x86/thinkpad_acpi.c
+--- linux-2.6.32.noarch/drivers/platform/x86/thinkpad_acpi.c.orig 2010-05-17 16:18:05.748224844 -0400
++++ linux-2.6.32.noarch/drivers/platform/x86/thinkpad_acpi.c 2010-05-17 16:25:58.299199699 -0400
+@@ -3387,7 +3387,7 @@ static int __init hotkey_init(struct ibm
+ /* update bright_acpimode... */
+ tpacpi_check_std_acpi_brightness_support();
+
+- if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
++ if (acpi_video_backlight_support()) {
+ printk(TPACPI_INFO
+ "This ThinkPad has standard ACPI backlight "
+ "brightness control, supported by the ACPI "
+@@ -6178,26 +6178,24 @@ static int __init brightness_init(struct
+ * going to publish a backlight interface
+ */
+ b = tpacpi_check_std_acpi_brightness_support();
+- if (b > 0) {
+
+- if (acpi_video_backlight_support()) {
+- if (brightness_enable > 1) {
+- printk(TPACPI_NOTICE
+- "Standard ACPI backlight interface "
+- "available, not loading native one.\n");
+- return 1;
+- } else if (brightness_enable == 1) {
+- printk(TPACPI_NOTICE
+- "Backlight control force enabled, even if standard "
+- "ACPI backlight interface is available\n");
+- }
+- } else {
+- if (brightness_enable > 1) {
+- printk(TPACPI_NOTICE
+- "Standard ACPI backlight interface not "
+- "available, thinkpad_acpi native "
+- "brightness control enabled\n");
+- }
++ if (acpi_video_backlight_support()) {
++ if (brightness_enable > 1) {
++ printk(TPACPI_NOTICE
++ "Standard ACPI backlight interface "
++ "available, not loading native one.\n");
++ return 1;
++ } else if (brightness_enable == 1) {
++ printk(TPACPI_NOTICE
++ "Backlight control force enabled, even if standard "
++ "ACPI backlight interface is available\n");
++ }
++ } else {
++ if (brightness_enable > 1) {
++ printk(TPACPI_NOTICE
++ "Standard ACPI backlight interface not "
++ "available, thinkpad_acpi native "
++ "brightness control enabled\n");
+ }
+ }
+
diff --git a/freed-ora/current/F-12/upstream b/freed-ora/current/F-12/upstream
deleted file mode 100644
index 3b023368d..000000000
--- a/freed-ora/current/F-12/upstream
+++ /dev/null
@@ -1,2 +0,0 @@
-linux-2.6.32-libre1.tar.bz2
-patch-libre-2.6.32.16.bz2
diff --git a/freed-ora/current/F-12/upstream-key.gpg b/freed-ora/current/F-12/upstream-key.gpg
deleted file mode 100644
index 601b65d11..000000000
--- a/freed-ora/current/F-12/upstream-key.gpg
+++ /dev/null
@@ -1,1597 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.4.9 (GNU/Linux)
-
-mQGiBDdxLb0RBADNEdWVwbqMQmY9oZQLHIXttEG69VoO/RdPcWcYDHLODTA63y3x
-d8apWKmYmovhMngQ5OPJ8gOmWuH5iIlzE+a30NqUrisMq2rl019uT2gfDV7gfFcP
-Mj3bwK/s+ANcLViZcVRKHEaDGQ0AJ6LBqYKwqbh1dRvgYZcfEfwdeIUwGQCg//tz
-xxE9JTY7/Utu5AMOTWARi88D/iZXkLUvFylh9p+nON0bJlsVGJ7juaaAbZeKwfy8
-ftAg1rJlKlzW89dHKzGQ8t/d2nklN2agujfHTSlOnaXBxDzCXioUL2CjRIBk9ZAA
-gNw6z0NbUfm0ZUXIxAW8mQ62E4wWffJ+nd2dPpnnep7vTqEE5U1sTLr0xsf0Pqt4
-gpnoBADIwFUtlj2+CWt0O+51b496su8KwqzICgID58k6bYUzD/0zmx66qjQtKZA6
-fN/3osGrrpFM/HN5ywWrUp61+PZmOPBuEG0EJappw9VVJJuGXcT9VTiOMcISlq3e
-4QKgFRtU5dZnfYpm6fwopdrWOf/GvoezzBlqm+6UweqGrYlmWbQpQWxleGFuZHJl
-IE9saXZhIDxvbGl2YUBsc2QuaWMudW5pY2FtcC5icj6IYQQQEQIAGQQLBwMCAxUC
-AwMWAgECHgEFAkL7m+4CGQEAEgdlR1BHAAEBCRBSNIRd8rkg9fzZAJ9pp//MY0U8
-v5EXPRzilZAKFZ8N/gCfav0FrEyFKUbgs4xW2jpxTWYZQD2IRgQSEQIABgUCPs5r
-vQAKCRBVlt0M6b9lPQC2AJ49PxorJQxa47uV1FnEhRpSFETBEgCfbu/hII1cukLE
-rY/vgqgbyfzUIlCIRgQTEQIABgUCPtE3WQAKCRBF5ZBo+Ru3n4HYAJ0cOlok+5M/
-vVwWnjCDItyaILtt3gCgi+vqcAH3QpJksYjnGp22wemZCDOIRgQTEQIABgUCPtF4
-zAAKCRA4mlY8wnKhJp2yAJ9gVcPN/XRliYg/JqPXImqVMeLbCwCePZ9aEeeIlG14
-196YRYtCdFGZOt+IRgQTEQIABgUCPtJV/gAKCRC3zpsZN6GHxh4bAJ9hbzT8VPOl
-gxjakfe1kvGpP0/mvgCbBsdrhKQkpS0yu57mhwCAaSuHb8KIRgQSEQIABgUCPtKb
-OwAKCRAiGMgejnwD/3v8AJ45h/I94jzYXx+RpBrT5AwH15lClgCeK7PfCkk6m8D9
-uOrqbryi51a7ClaIRgQTEQIABgUCPtOgswAKCRAuLPZ7d5amC1b4AJ0YRyrgWsKQ
-HKJx04GaWB5ppMOIegCgwHtFnKKdTk7b9IoTrfA1GFgxhqqIRgQTEQIABgUCPtQT
-LwAKCRBJRaU313tD+0/5AKCfAaW3cDJSOwiUKrCDyWwXMW5iOQCePus6Ddm8z5Gc
-oRTbDYoX8M8+AlWIRgQSEQIABgUCPtWNGwAKCRBQuyl0LVmn0rteAJ4+RutOsd1E
-aisW2tzXG20rfTJRvwCgve5ZzWcpbUUJsScRPcp8Qns2WI6IRgQTEQIABgUCPtZk
-6AAKCRBuA49e4KODd19rAKDF5mSNZ1vX9bTOj0vj+UlglP9kzwCgwOEEirq54q/t
-PBeqnXD0h5wmsVeIRgQTEQIABgUCPta07QAKCRAk8T4/5owAkhneAKC2nssRC4T5
-z4Y4rsk50FWDPekK5ACg8hkDAVF/eXZ0vSNATBb+zJClM2SIRgQTEQIABgUCPtgW
-bwAKCRBZUSdMgY/jQDEjAKC09vUgM6J4SmdWu3TkVMp5KBKKSQCfSf9KLdMoW/dA
-n5gh3QGOZBkUl+2IRgQTEQIABgUCPtlL5QAKCRBRxjMgeX3HXyL3AKCmwX2/aDwo
-o/zSOfkS1sNlLLO1dQCfeg9vmVUdW1xoiww69X18UfsnVomIRgQQEQIABgUCPuJO
-BQAKCRBxc32m+MTRT5LiAJwMycDHMUDBZkFd0DJePZtvbXCYJwCdFFDi/M4+fYRy
-HrKl9lB8lS5RioqIRgQTEQIABgUCPwEkKQAKCRAwt65wR936hZ4ZAJ4p5DGc1zMK
-OKYlb7fOasjbx7eSJwCcCzuYgZqAxF/+o3YcXt7zgIHnqIyIRgQTEQIABgUCPwEx
-qQAKCRBdbP1mfoXQM5scAKDTSqU5iA1DskAtIkQrvIpdW+kucgCfS2PrN0Ca6FH2
-Os3HUMdOjrTWzsaISwQQEQIACwUCN+7IagQLAwECAAoJEFI0hF3yuSD12Z4AoOM9
-xthHRcIzCcuhJL1Enk8SGP7cAKCw8DyIP4PF/UNIp5pWSFO6ciOGiojcBBMBAgAG
-BQJAGjk1AAoJEMKjXUokOhMpiaEF/R8jNxqcKhCBedDgILJ7zCDCqFAH6k7AJucA
-1duvsWcy01nHBFM4Z3aNGaWsMLXLJdolpCIDdFAB/Ms4qJLMgrE9wxxS34AT28TW
-zzZ/CKEQnAIjfp6rTrAT99zI3u9ETm9a9PdI/1JvVc8dmOFmrydLYCcDV35H6xly
-ewhWqFNgEhxISBcY0/k+cZnkpALkNK9Ez1TVkVKKTwPQTCD7gsyJkSlI4Bkpd7Wy
-2MmojzImIz7E6FfQW9StZe7BMd8pMIhGBBMRAgAGBQJAGjl3AAoJEAvgKygRZSHZ
-D60AoKPClObX5d6QLxbTviv7pUwvcTtNAKCVcXweCV02rtRu3mDtCMVmbgmOk4hG
-BBIRAgAGBQJCn1n6AAoJEFjalq8LkBFBvBUAoKO/j0akDS9wS0/a23G7huTQaRZu
-AJ0YoXtfjZ8gM/RM3xgEVmVHsx2im4hWBBARAgAWBAsHAwIDFQIDAxYCAQIeAQUC
-N+7IawAKCRBSNIRd8rkg9a47AKDhabws8MOMgKPw/zauh5HjsFordwCgiEE3wI2I
-a5H/2mMC2DDiGhooz/+IRgQTEQIABgUCQv15aQAKCRA8Y8o/oLPoizagAJ9GcARJ
-3OSX1p/HifHocXAU+k7PNQCdHqOSnBkRDUvV///IcZxUTU4sU9OIRgQQEQIABgUC
-Q4MmnAAKCRC90FGoFrb4TZ8ZAJ9XdJNnr0eTvKZPVQH6xsdz0cQ7tACgkOVAQ9+l
-WnhY3UI0g6pW0nRJz7KIRgQTEQIABgUCRKQUqwAKCRBs20NscgSFJexAAKDq2hkr
-cku2enQyIwBGIRxrltDhlgCfTX3xqqme3bXyL57gXE3DOJLdmXKIRgQQEQIABgUC
-RKQftQAKCRBTn4yvDOJxHdVlAKCfN/+yDijOyAkV/ND1n+/ATbAZBQCcCbhCjKP4
-xXjFu+V/7+DTIt+ZTmyIRgQQEQIABgUCRbAMXQAKCRChmbGhUlG0YiJXAJ90gnZB
-i2lXvCESGXmA6RZ9PgdrNgCdGwWmS5z+AIgmb82UVvLzxPB6VViIRgQQEQIABgUC
-ROdwuAAKCRCnyMOy9x7fHDtLAJoDSswl+RkL1mlBT90xzeM5/9lTrgCgir4ktL68
-h1yeVNortjQjBO6aorKIRgQQEQIABgUCRxzTsgAKCRDHJIY3TSCJ24jOAJ9FHip0
-QFAZeOU8kkqtjmX/0y853ACfb785oqioATWVSB0nVoAs0K7W10WIRgQQEQIABgUC
-RxzT/QAKCRB2nZNyaMUfOLeCAJ4z0YiqXO+hO/ma5remZUZeCLKoywCgwgyP0aW2
-I6ndvA47qsFA8c4xkwSIRgQQEQIABgUCRxzpnQAKCRDaKMI6ef9EdFp+AJ9BuizH
-R/ogJNoW7t9q8lU7xYVDiwCgyTZYWLvQR4QoQKMMdhqP3XkC3ueIRgQQEQIABgUC
-RyHmEwAKCRA/3qp6T+sDoVq/AJ9OqM1umFB29Ein8ZTwinTJLMUHQQCgkMI5U14X
-SmdUwoEKjQaGU+ZHTMSIRgQTEQIABgUCRyHraQAKCRBPq0nLRJVA8gLZAJ9l8keH
-Nr9qOBenTqvglb0b3Jo7gQCfSFKeU3y5szTqBZ6/IuVVbwy+R1a0I0FsZXhhbmRy
-ZSBPbGl2YSA8YW9saXZhQHJlZGhhdC5jb20+iFwEExECABwCGwMCHgECF4AECwcD
-AgMVAgMDFgIBBQJC+5vjAAoJEFI0hF3yuSD1kh8AoI711CdTLQu0cdTsXaKSnzrE
-eHwKAKCILV5n1ju9g4O+o0sj9n9s7AHiuIhGBBIRAgAGBQI+zmu/AAoJEFWW3Qzp
-v2U95wEAn0ZG0sMoZooRJUgwJusllvYtBYOsAJ9+C8x4Xb0yS3utn778PAokwbTA
-8ohGBBMRAgAGBQI+0TdcAAoJEEXlkGj5G7efgpQAoMVRs2+0pyuuWhDcf2FZIrgV
-Rck5AKCSqbKmhWJfTeqfYPGI8ttQ4CuipYhGBBMRAgAGBQI+0T6MAAoJEMXAxcch
-jRjX8PoAoJn9OmLvAkfvjWTgLlsTlF4lWq+yAJ4nrM3p7veOe4i6Fo3CGFS3jsl8
-UIhGBBMRAgAGBQI+0XjKAAoJEDiaVjzCcqEmFD4An3gbPv/+xGgiKtcEHAo4or9j
-qq85AJ4jlCnPhgpO8q1ZQFKGXNwKtmBewIhGBBMRAgAGBQI+0lYEAAoJELfOmxk3
-oYfGVpkAn1ntZR8Bae9zNYRDcpnz7maJJHkvAJ9C+Td5kIeX1zWaiwgKAGO1eeQd
-RIhGBBIRAgAGBQI+0pspAAoJECIYyB6OfAP/qckAniPS0ExpKR1KyBTV2bjQ9XLK
-6pE2AJ9Z4BkWv3EG/orD0E/Cno7emb6zmohGBBMRAgAGBQI+06CtAAoJEC4s9nt3
-lqYL4J4AoMtYwdwhRz02c3Bf650trPv4FjwpAKDcrf3OKg3EBKGRtmNB0fKXJqbR
-HohGBBMRAgAGBQI+1BM2AAoJEElFpTfXe0P7ydYAoIXJAeSi5o/vjQLjb+K++LN/
-GWFKAJ4wuN3RxnIaKhg4cmeo92Wp319a+4hGBBIRAgAGBQI+1Y0dAAoJEFC7KXQt
-WafSEpwAoMfUs9W0HPwHJgTFYJQpHOQY7xvmAJ0X5rB2neFaD1PmVOPCS5TPAPpe
-+4hGBBMRAgAGBQI+1mTkAAoJEG4Dj17go4N3ffQAoJAp2XtCrSHU35qGthJCVNs/
-muw6AKC/8VkE53YTzfGF3xXLB7qDgIuFB4hGBBIRAgAGBQI+1iGFAAoJECn45GVn
-iJZfrlQAnRP1qezPfEQJgxkwpX5aZIY/wEZOAKCSWaUSrrvcESnqWmGYLwFqPH+Y
-VYhGBBMRAgAGBQI+1rTnAAoJECTxPj/mjACSdA4AoIQ8LbBoQaw8AGe1Hcsb+VlC
-RpCyAJ9/10W3baurjoIyV+S6ThFwIs7lm4hGBBMRAgAGBQI+2BZtAAoJEFlRJ0yB
-j+NA7LcAoMglf4LSHbVKrWQXsXgZgBBc/xoDAJ4219gt4pXq6Sr5a/hl29R5mrWh
-bIhGBBMRAgAGBQI+2JjkAAoJEHV+VfRE0xInmAAAoIuN7Xonh+7Gn5oGlLPt2Snk
-KaAPAJ4xC+PY17nlPnI0CiVY/DnbISta2IhGBBMRAgAGBQI+2UvfAAoJEFHGMyB5
-fcdfP68AoJTDLS2hIfbm8C0KUgqAkEtZvhThAJ0ZK0RMCedjOMU8nBO9Mve5u/hi
-7ohGBBARAgAGBQI+4k4QAAoJEHFzfab4xNFPuewAoNiuAOARLz6/XnwPhkwbNttj
-OAsbAKDfRTx4R0AUR2t7QeGiw6UJo4yJfYhGBBMRAgAGBQI+44OQAAoJEN5HUcxj
-jSIa2RwAn111QjcFqc5c4o2qA6DC6Ph6OMKvAJ9mn1t8Pn3IBb8oj94o8MOOcdZl
-JIhGBBMRAgAGBQI/ASQtAAoJEDC3rnBH3fqFPpQAn2VSgDpf3H/X1UHVbBWPahvs
-4r7XAJ0Rb5gdKgg+1tFoDE6ZrIbAAUxUb4hGBBMRAgAGBQI/ATGxAAoJEF1s/WZ+
-hdAzXHsAoKKa11+JJkZg2uGCCN00tGKYAD+VAKCbLR1ze8bz8aUezQj8nyGtRis6
-QYhGBBMRAgAGBQI+58+7AAoJEGP76cgpbgh/c2kAoOUedsUDpdHk28adJdsvLzrB
-TmOYAKDP+vxxpDjex7AzVMkSoZiC/WyC/4hfBBMRAgAfAhsDBAsHAwIDFQIDAxYC
-AQIeAQIXgAIZAQUCPrrQRAAKCRBSNIRd8rkg9e9MAJ0VEU0k5MFqoRqNZ68SbJol
-31YWsgCeIzQUHy8fgrb6V7uAYEyC74pDKo6I3AQTAQIABgUCQBo5LQAKCRDCo11K
-JDoTKcWvBf9eWyn/JQH1CzUu93HMnTkdHzAc40qa1m6xSfpkIuKCUcP5akYqZLPd
-oqMc/aifkbtCN50OMe1qOb6OR85qK0UiodbNwfIL7poiXKUyrt6fSI8JvVf5NJ8+
-nkP8WYqc2vBQkWq/4sH+rL6+NGsMicH6l6PJQlMk7c77pLJfyum+uNovW2EkoLmR
-czfvLuTlaGFxLvxipy64VywTpNwPMgbtMhAgej2kptX5wUOuih5W8UIk59YpLtJb
-RXpYbTFnk42IRgQTEQIABgUCQBo5cQAKCRAL4CsoEWUh2Yj/AJ9kwKD3mTLR1KI6
-Aq9M3RZ56WgTxACdFKbTSS/Ybr5UFvxvAj5z4PVV+aeIRgQQEQIABgUCQJlVUAAK
-CRAigZHBVn4sF4DHAJ4pgl30r8bd0dLCp+6beu04+GIcvgCfawFrMha887UQG1O+
-U4Yk7vYwNh2IRgQSEQIABgUCQp9Z7QAKCRBY2pavC5ARQTOuAKDyOFSvOz6l3El9
-VXVf8vc5hxkL1ACg9WgwUB+96A8vujy20WPS2SIwAxeIXwQTEQIAHwIbAwIeAQIX
-gAIZAQQLBwMCAxUCAwMWAgEFAj660EUACgkQUjSEXfK5IPW+BgCfbUWOBWK2dvR2
-FktyFIgW2d76+GQAn1HP3/aLaoO4vQyFufOUODhZ0KzUiEYEExECAAYFAkL9eWsA
-CgkQPGPKP6Cz6Iu2SQCdFhwYbaKO60weC0koSvvfhpJGmgMAoLsfCg1WJOSjiW5a
-Cb0t21suPwRGiEYEEBECAAYFAkODJqMACgkQvdBRqBa2+E1D8wCgj3T9Xsp9XRAz
-r3BNi8cxORG8AVwAn0wD6VajOqFKN4IiFLKR1uCf5RpgiEYEEBECAAYFAkODJpwA
-CgkQvdBRqBa2+E2fGQCfV3STZ69Hk7ymT1UB+sbHc9HEO7QAoJDlQEPfpVp4WN1C
-NIOqVtJ0Sc+yiEYEExECAAYFAkSkFKoACgkQbNtDbHIEhSUHawCg0/2c6QEz9vX6
-4kV/+ERNlrYNDnUAoLuOGVjZ2a8qCtNEDj7faGoqN42aiEYEEBECAAYFAkSkH+MA
-CgkQU5+MrwzicR2zawCfUM9l/oLZPmy/kWTwDph4Wjo3qAIAn0jL7XYWIsN4X3xn
-x5FeKzdXYTuXiEYEEBECAAYFAkWwDGIACgkQoZmxoVJRtGK/fACfbs9Ay62cqhAS
-3Fyh8HlEjEg5sy4An2hasNmdWmqrZvC7blQNyIui2VaaiEYEEBECAAYFAkXpNlEA
-CgkQ2ijCOnn/RHR97QCdFHPKS7TJCylPb1fog5ft1ixxW3EAoKwIkkd4OS2odIcV
-ezXGsy5c0/IsiEYEEBECAAYFAkTncMIACgkQp8jDsvce3xznJQCdHA8mAXzu1fyB
-h1fn+kN8q+wbprAAn2nGLI6vRHUFpTBgAJkXGOvVsvz9iEYEEBECAAYFAkcc07IA
-CgkQxySGN00gidvYSQCZAWBqwCU71jRay58AQzaqqxXV2lQAn0uw2x/x6Jz/w/Ln
-49luPpFVMpR6iEYEEBECAAYFAkcc0/0ACgkQdp2TcmjFHziIQwCeKBs8y63sP6yo
-ohAS/MNzcMEAtpAAoIjgO5qX3WHUOYz2ECSVv+1FxO+liEYEEBECAAYFAkch5hMA
-CgkQP96qek/rA6FDSgCeMefTqLce4jS6MwJChthDyFGwg78AnifMWJCUdXbvanLS
-1PtmUsBLtEeWiEYEExECAAYFAkch62kACgkQT6tJy0SVQPJr5wCgoTwPeCai3kR+
-2gj1hhwV2PgN0NwAn1H62tavcU5VEvSaMQ+bYZ10ga7FtCZBbGV4YW5kcmUgT2xp
-dmEgPG9saXZhQGRjYy51bmljYW1wLmJyPohhBBMRAgAZAheABAsHAwIDFQIDAxYC
-AQIeAQUCN+7JDAASB2VHUEcAAQEJEFI0hF3yuSD17WYAoJE7e1hQX7YLJKfNXsQs
-iahFLp6dAKDbTFzgUNRcFxni15KQcSFVCHzRkYhGBBIRAgAGBQI+zmu/AAoJEFWW
-3Qzpv2U953MAnihBnrd6piAdQxg+Zlm1cX6/b3TGAJ4zqlFANOsKXrhH9wMUjUaK
-xkxZvYhGBBMRAgAGBQI+0TddAAoJEEXlkGj5G7efUFMAn1/YqarA3T1+YOgP6Ukd
-5Hiy88aXAJ9sLCIGXEIMDf4RnLANVk5LAY8TBYhGBBMRAgAGBQI+0XjMAAoJEDia
-VjzCcqEmYDQAnjKeRkWey4ItYza29EJ0R5hfxDMfAKCQYIOTo8KxQzTxTRs0d3oZ
-WGtjDIhGBBMRAgAGBQI+0lYEAAoJELfOmxk3oYfG2mwAn3kHvxzSk0D5lMqNZgXp
-7Jyre55TAKCM7rGuZkGDBQqmSjD6Jy4R5ROlx4hGBBIRAgAGBQI+0ps7AAoJECIY
-yB6OfAP/BX8An2tSZCbECPN+FASn2rnk72jJUs5cAJ0dWOYiWejA7SxzraRHhd5v
-93f+cYhGBBMRAgAGBQI+06CzAAoJEC4s9nt3lqYLQUkAmwdI44BT8H4e7kfye6aF
-YxZQEr69AJ9CSuwxIGYyeIrf68Mw6c53mLYJDYhGBBMRAgAGBQI+1BM2AAoJEElF
-pTfXe0P7kPAAn1MWJTsdIM+oZigKRjrFwobQJgRWAJ0RLzdL+pRvODFWYWYNXoHy
-wpKtkYhGBBIRAgAGBQI+1Y0dAAoJEFC7KXQtWafSaDYAnirOVQmrJIZuWeXeCZyA
-KCmURM00AJ9xQYalUNQy7jZzArnRmBNQEZVEEohGBBMRAgAGBQI+1mToAAoJEG4D
-j17go4N3kkoAn1XAphscfAUI1zdktzMPnJMtRm+PAJ4pcxc4HBtTCIYdAzuwTCJB
-h38oMYhGBBMRAgAGBQI+1rTtAAoJECTxPj/mjACSC+0AoIFMtt50NodMs2F/il/q
-v39uJ0wQAKCa6cgc6ONUj+Xb7LSu2m3MdRH4S4hGBBMRAgAGBQI+2BZvAAoJEFlR
-J0yBj+NA+V8AoJobejGAU5yXULIoZ4BIktZYHbl3AJ4t0+11pa4a83y8kRG5MDwT
-qZ4zVohGBBMRAgAGBQI+2UvlAAoJEFHGMyB5fcdfTD4AnRY5+6rtflNV4Es1whch
-pF1w3BZ+AJ99pF2FwbtxfEme+w0Xp2VamWiy0ohGBBARAgAGBQI+4k4QAAoJEHFz
-fab4xNFPt3oAoNP+ftsvBBVJ1RJH5M3uIxUEFYhtAKCWqgNklcahdhvgiQgIsWkU
-w0q2ZohGBBMRAgAGBQI/ASQtAAoJEDC3rnBH3fqFtKEAnAsAOgaqeWj0iRuzXxff
-vJpP6RfQAJ9DhZwVvoMSMwUaZJfrTuaEQKVuPohGBBMRAgAGBQI/ATGxAAoJEF1s
-/WZ+hdAzbMAAmwYtvEwVTRjAldEOM7JHIOdaqsgOAJ4+E3tls9u2TlfYFSr+ARta
-APHUUohVBBMRAgAVBQI37skLAwsKAwMVAwIDFgIBAheAAAoJEFI0hF3yuSD1ZGsA
-oPV21vBrOrCliTE5Y5EjVisHcCB0AJ9emNkH8gKnTByxl90RXwZGhY2OaYjcBBMB
-AgAGBQJAGjk1AAoJEMKjXUokOhMpq80GALIytSv0VaVUp0x5zN6O/vh3+xZyrOgR
-XZoiBqd6zbk04T9Uk3bm0zMuVOE08Ly6APnenLpPsAnzV4CXTJqjxnrCGWJXUsM/
-iRU6WOHPZlaetKx0GiDaochFSoHmwRMfe+hqG1kfob1c75BlU7ZGSL4fJKox1d4P
-rLJBjSc5aFqWpw0Hrc8SazWY9YsRE1xzSnmRudENoH7B3hrTDmSfI0+2cneSDYZF
-AySnMY+RgdoB+jlI4I2WnkikeMFS2kJVRYhGBBMRAgAGBQJAGjl3AAoJEAvgKygR
-ZSHZh70AoJMe34vDaRd3HCyRxCPEkphAjcrNAJ9jkjXECVyaRlVssVjrHkIe8TVL
-E4hGBBIRAgAGBQJCn1n6AAoJEFjalq8LkBFBTEgAnjcMkeddsofZs4kp1kQ1XCF0
-6t0GAKDaftC5+JCDpbHxRlAg7592Sgf6BYhGBBMRAgAGBQJC/XlrAAoJEDxjyj+g
-s+iLHrIAoIBDjoU8pG5ujtIfDt3dzKlsnQwlAKCGnJ75fT1EvX40NGC+a2MV12im
-8YhGBBARAgAGBQJDgyajAAoJEL3QUagWtvhNw4oAn3i4Ol1cB1rU0UlQ6Kpp+BvL
-mbl3AKCxRaCNL5rSDwza58IcHgCYA1JuqohGBBMRAgAGBQJEpBSrAAoJEGzbQ2xy
-BIUlGjAAnA6q/bEbuTQ40cRkciok6qIrjRi4AKC1gs03FjU7swTpn0D/sExRHato
-cohGBBARAgAGBQJFsAxiAAoJEKGZsaFSUbRipk4An1HNnkfHT2MdUehP5WI9UHNj
-RbSZAJ9xA6kYqZbyA0I+7KvWXsp+3/NLAYhGBBARAgAGBQJE53DCAAoJEKfIw7L3
-Ht8cmX8An3ZydeyE6WQmGo6A5Ygdy77sEDrdAJ9be1EXSo3DGHU39KEqiqDqJdKl
-FYhGBBARAgAGBQJHHNOyAAoJEMckhjdNIInb/LcAn1nDAxIMwjDGogSmuO29Ftpy
-M43KAJ0ZwNI9nE8ZVnfoRVqV+4wf+63HCIhGBBARAgAGBQJHHNP9AAoJEHadk3Jo
-xR84gKAAn0jz3i1j3G0MmxWO/2NPV1UgoBlDAJ94JwiRN88WpSIg8Ry7QsLg93YE
-iYhGBBARAgAGBQJHHOmdAAoJENoowjp5/0R0P5AAnj9bhzFVuLfLTYAcj5TZeBTD
-GRGcAKDGguy7ik3X8qwIgZaRYen082RfOYhGBBARAgAGBQJHIeYTAAoJED/eqnpP
-6wOh9poAnjh3qNWJ0Ee6levhCtxg9dmH33vYAJ9lgem1Xl1wmaicH6/4cvCxONoB
-H4hGBBMRAgAGBQJHIetpAAoJEE+rSctElUDySjAAoNE9ETLlZcO9aHv9+QzeKLmI
-nd9ZAKCmHzDfEDup2JwV33In5AUjywtQeNHQGNAWARAAAQEAAAAAAAAAAAAAAAD/
-2P/gABBKRklGAAEBAgBIAEgAAP/bAEMACAYGBwYFCAcHBwkJCAoMFA0MCwsMGRIT
-DxQdGh8eHRocHCAkLicgIiwjHBwoNyksMDE0NDQfJzk9ODI8LjM0Mv/bAEMBCQkJ
-DAsMGA0NGDIhHCEyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy
-MjIyMjIyMjIyMjIyMv/AABEIAJQAkQMBEQACEQEDEQH/xAAcAAABBAMBAAAAAAAA
-AAAAAAAAAQMGBwIEBQj/xABAEAABAwMBBQUFBAgFBQAAAAABAAIDBAURBhIhMUFR
-BxMiYXEyQoGRoQgUgrEVFyNEUmLB0RYzkqLwJFNysuH/xAAaAQEAAwEBAQAAAAAA
-AAAAAAAAAQIDBAUG/8QALBEBAAICAQQBAgYBBQAAAAAAAAECAxEhBBIxQVEFkRMi
-MkKBoXEUI0NT0f/aAAwDAQACEQMRAD8Av1AIBAIBBDdVdqGldIl8NbcBPWN/dKUd
-5ID0PJvxIQVBfvtE3mqc6Ox2umoYuAlqD30nrjc0fVBAbn2k6zvIcyr1DWua/jHC
-4RtPwYAgjhpquVxe6CdznbySwklV7q/LSMOSfFZ+zF9NURgl8ErR1LCFMWifElsW
-Svms/Z1bRq/UVhAba71W0rB7kcp2f9J3KWac2Xt81fbnNbcPul0i599EI348nMwP
-mCgtbTPbrpW+OZBcHSWeqduxUkOiJ8pBw/EAgs2KaOeJksUjZI3jaa9jgQ4dQRxQ
-ZoBAIBAIBAIBBytQajtWl7VJcbvVspqdm4Z3ue7+Fo4k+QQebdc9tt71K6Wjs7pL
-Vazuwx2JpR/M4eyPJvxJQVaSSSSck8UHQtttNY8vkyIR094ql79rp6fp5yzz4SGn
-p4aUBsMTWjmeZ+K5rTNvL18eOmLisOpEQ9uea5rcS9vBq9Tcu52CrV8OfNGralyq
-6zwVYL2tEUp95o3H1C3plmrzOo6GmXmvEotNC+nmfFIMPacFdcTExuHg3pNLTW3m
-GClVK9H9omodFVDf0bVl9GTmSiny6J3XA90+Yx8UHpnQfaZZddU4ZTu+63Njcy0M
-rhtDqWn3m+Y3jmAgmqAQCAQCAQRvWmtLZoixuuNwdtSOy2npmnxzv6DoOp5fIIPJ
-GrdX3bWd5fcbrPtYyIYW7o4W/wALR+Z4nmg4KDKNneStYPeOEkTGnjbDTsY0AYC5
-Lczt72GsY8cRHk+xhO88TyVJs6qUmW7BHsNyea5slty9jpcXbXcspog9qiltSt1G
-Gt6tJwcx29dEal5FotjnUuBqGAfsqhuM52HfmP6rpwz6eR9SpG4vDhLd5YQPUdZU
-2+siq6OeSCohcHxyxu2XNI5goPT/AGU9rEWr4m2i7uZDfI2+Fw8Lapo4lo5OHNvx
-HMALUQCAQCDnX290OnLLVXa4y93S0zNt55no0dSTgAdSg8caz1fcNa6hmulc4tb7
-MEAOWwx53NH5k8ygjyAQbNA3NYzyBKifCa+UhdcYoW4AMj8cOQ+Kw/CmXqz1tMcc
-RuTButU8nZLWD+Uf3VowU9sLfUs8/pnRP0lWH95k+an8HH8KT9R6r/sn7lbc60fv
-Lz64KTgx/C0fU+rj/kk428zNcBMxkjeoGCqT09f2t6fVsu/9yIn+jdxmirLXKWEg
-sw7ZPEb0rWa25RnzUzYZmvpHF0PLCAQO01TPRVUVVTSvhnheHxyMOHNcDkEHqg9b
-9lnaDHrnT/8A1BYy70gDKuMbtrpI0dD9DkdEE9QCAQeZO3bXDr1qAacopc0Ftf8A
-ttk7pJ+fwaN3qXIKhQHPHNBuR2uslAIhLQebyG/mqTkrDevTZbcxDdpLRURSOdI6
-MeHAw4FVnLDWvRZfevvDbZa8+08f6h/dROaGlegtPmf7j/1vU1BE3cI2vPplZWyT
-Ltw9Jjr5iJ+zdFvnLfDSPAPAiM/2Vdy3mlPHEfZmLBVTMLhRTOHM7CmL2ZWw4J8z
-DTqdPuafHFJGfNuFeMlvhzZOm6f1fX8OXLbWMc6I1cYJGC1wLThafiT7q5v9NSJ/
-Llj+2g+x1LQSySCT0fg/VIyx8InoskeJif5aE1PNTv2Zo3MPmOK0i0T4c16WpOrR
-o2pUCDv6M1VVaN1RSXily5sZ2Z4gf82I+03+o6EAoPaNvr6a6W6mr6OUS01RG2WJ
-495rhkFBsoI1r7UzdJaLuN2BHfxx7FO0+9K7c31wTn0BQeLZJHzSvkkeXyPJc5zj
-kuJ4koMUG9bJ4IJy6UYcfZeeX9lS8TMNcV4pbcu53zdnaLhg8+qy7XfGaut7aFbK
-HuDQQcLWkacfUXi06g9Q0Yqi+Paawk4D3+z1xu/5wUzOmERtO9Jadr6WFsr7lSUM
-chDyyUbRIVJmsy2rF6xwmMFbPCX99X0lVTMPgzHlpGQN3Q7xgZ+KzmYbU7v5N1lT
-TPbVONy7hhkMOGtxs7geAPHJ3b/NK6lOSbeN60iNxtltqzil1BJUT4z3b3Y5bsZV
-5nXpjEd37kKvED4XRmV7TLjDmjORjqr0ZX3vk3SVAc3Yc7fyzzVb19w6+nzbjttJ
-u51ULaZ8DiHPI3N6Hr5KKVne058te2auCtnAEAg9H/Z61Ua6yVmm6iTMtAe+p8nf
-3Tj4h8Hf+6C6kHn77R1+Lp7Rp+N/ha11ZM3PEnLGfk/5oKHQCAQdGg23UsjBjG2M
-DO/hvwqz5T6PxU0lTII4WF8jjgNG8lTvRETM6hItKxZnLSxrneIeLgD1WWWW2GNz
-pYVFaKEziSagFW/G90gc4cOQG4LGLzHh0zhif1M6y37VyorfFTtgZLI2Usa3GWjm
-efzUWmU1rWOW3LbxSX2elexmKhwezbwQTjHPdnh8goiZ8JmIn8x+4WhlQP2tO0OZ
-yjp9k7vgrTMoitdcQrPWtsfDURuLi/acGtdjGcrXFb0589JjUodcSbbc56WPDzA8
-s2ndQtYnuhhMdttOYSSSTxJyrKkQCAQTHstvx092i2iqc/Zgml+6zdNiTw7/AEJa
-fgg9j4d5IPIHbBcTcu1G8u2ssp3tpmDoGNAP+7aQQZAIBA5T1ElLOyaF2y9vA/mo
-mNxqUxOklsFXCLzSVFOXNeHYfGeROQMHgR6qlt61K+OYi8SlVmtgodQVFMXtc3ay
-C0bhnfhYXtuIdOKvbeVqRV1NZ7ST3bS93gYT16qK8Q0vzO5nhHI56ua9x1NFsvc4
-4LpBx8geQSI5LW3HDavclznrIpqxsLGln+S0jP8A8S8fKMdp8xDftOon1EBop2Fz
-2jwl3HHRNzpMREzuOET1QykfNC+sIbTwy944k4zgE4+JSu4mdIyTE62pe4TfeLlV
-TZJ7yVzsnzJXXEahwWnczLWUoCAQCBWucxwe0lrmnII5EIPQ368D5IKDute+6Xes
-uEudupmfM7PVxyg1EAgEAgfo5nwVTHMaH5IBYTgO8s8lEkLG0fVPmvUkNRI572gN
-Bc7O4csrDJWNbh0YrTvlbE9virLWHv8Aai47O44O7PqqdvG4X7+dSj9vpqO33FkN
-ztslU3bGxO2Vxa9vmOAPluUbhrFcn7ZdbUTaPumxUWmoJJZG47yck4HXA/qRwUzM
-R6RWl5jmxLBaIKARZO1NHE5z3dXHkPIKK8ztW89vCDdotSxkLYtpu3I/aG0MgbIz
-v8sq9I5ZZLcaU+5xe9z3cXHJXS5yIBAIBAIF2j1KBEAgEAgEBwORxQSrT91fNeIJ
-iGsnzsyOB9sn3sct/HCzvH5V6Tyuu23MiiL35cdnD2nmsYlpb5N01bs1DHsDntcf
-Ds7vn5KkcS3rPdGpdm5CpZTfeJojI3GQGu3hWtMyUise3Bprq5ksrS4EluXO6Doo
-jiGd53Koe0C4z1V9DC5wi7oEDPHJPH5Lox+NsL+URWigQCAQCAQCDq6mtf6F1Tdb
-ZjApaqSIegO76IOUgEAgEAg72k7fW111c+jp3Ssp2CWZzeEbQ4byqZP0ytXzC6I4
-H05bJGHFj2glq462dNqt2lpGF4lp3bDwc42eBWkSpzDpVslZUQGOSo2WkYOyOKmb
-DguoNpzooWHB3vceJWdrLVqqztAtdS26mtZBI6mbG1j5Gty1hycAnllb9PbddM88
-attC1uxCAQCAQCB/7nP/ANt3yQT7tvtbrb2n10uyWx10cdSz4t2Xf7mlBXSAQCDt
-2fSGoL7MI7faqmTIz3jmbDB+J2Ao2Lc0p2ERR1X3i/1batjACKeAOawu/mJ3keQw
-guGn07bI6E0sVFDFE6LunNjYG+Hpu9AkxExqSJ1y5Z01JSAtDO+h4DHHC5ZwzXx4
-dEZYnyajsMLXZa5zD0KiKJmzYdZIjve44HTmpmiO5p1VFHDC5kLMDqeaztHw0rPy
-SzaJp5zUVN0p2yxzxGIQSDcWu45H/Oq2wYpj80ssuSJ4hXesewRjA2fS1SQ7J26W
-rk3Y5bL8fQ/NdDBT970xe9OTCK7W6emJGQ5wyw+jhkfVTsclAIBA9SUstbWQUkDd
-qWeRsTB1c44H1KD1t+qizdB8kEP+0Vp81Nktt/iZl1HIaeYj+B+9pPkHDH4kHnml
-paiuqo6alhkmnkOyyONpc5x8ggubR/YS+pDanUtQ5gOC2lpnb/xOx9B81Gxalr7M
-tI2pzX09hpDI3GHzNMrt3PxE70Eo+6xhuAweSaDzYwxuAFIzaMHcgcH1RBHRsf7T
-QfUJpLA0sJ9wKO2E90kbSwMdtCJuRzxlRFYj0TMsycKyDTgHcQgZlpIJ43RyxMex
-24tc0EH1BQV/qbsZ0xfhLLT05ttY4ZEtLubnzZ7J+GFAqXUHYdqe0NMtvMN1iHEQ
-+CQfhcd/wJTYraop56SofT1MMkM0Zw+ORpa5p6EHgpFidiGmzfe0GCskZmltbfvT
-yRu2+EY9drf+FB6vx5lBzNR2WDUWna+0VIHdVcLo8/wn3XeoOD8EEU0V2f2zSlBF
-HDTxurNn9tUuYNt7ue/kM8lEQJpDGA3KkOgbkCYy70QIUChBkgMnqgNooELigxKB
-MIDCAwgQtB3EII3qvQ9k1hQOprlTDvMHu6lgAliPUH+h3FRoNdm2gotBWGajMzKm
-rqJ3SzTtbjaA3MGPJv1JUiZoBA06MBxeOiDBgxCOpCDInAQAGAgCEBhAIBAYQGEC
-YQVbq/tTq7bfJLRYqGOUwS9zPW1DXOjY/m1oGM44ZJ47lS14q2w9PfLMajiZ1v00
-qDtUvlI8S3e209XR7y59Ix8MrAOJ2XEg+m71WdeorM6l25vpWalO+vMf4mJ+0rQt
-N3ob5bIbjbqhs9LMMte36gjkRzC3eY3UGcbcnJ4BA8gEAgCMoGi3ZwOQQA370QES
-EAgEAgEQEBwIRKm/1YX7/E10kmqKZ1qrZ5pGt3yZLiS3abu2RyJGcbtyyyY4vz4l
-2dJ1lun3WY3WfMS4kthq7DPPTVVNWRxtxjvWOfEP/CTGC0+vyXHlx39x9n0PQ9Z0
-upit5iJ9W9f4n4dLsor2WnVdRY4KrvqSuhdO2PIPdys9Orc/ILrw3taPzQ8H6h0+
-LDkiMVtxP9LrY3bWzgPAYGECoBAIBAcUGDgRw4IMUQEAiQgVEEQKgRAIlhKxzmHY
-IbIM7Djkhp64zv8AREItYuzexWG9i80v3p9cWyB8ksuQ5z/ads4wDx4bt6hKYABo
-wFIVAIBAIBAIBBiW5QYkEIEQCARBUAgEBglBkGolkgEAgEAgEAgEAgEAgTAKDEgI
-ERAQKBlEssAIFQCAQCAQCAQf/9mIXAQTEQIAHAUCPtZSiAIbAwQLBwMCAxUCAwMW
-AgECHgECF4AACgkQUjSEXfK5IPVx7wCgsHNOiaRzZM57EddeoV2Fueb7gaYAoKXG
-MLRjACqtxgFkE9j6byYSvEzoiEUEExECAAYFAj7bwVoACgkQxcDFxyGNGNfQugCY
-3Jkyvvvd5D4AFAnLbv7S5a1JoQCeJDRBCSZ6nwh50r2fTst/TfPg7ZeIRgQQEQIA
-BgUCRKQf6AAKCRBTn4yvDOJxHeYrAKCxd8QkhjxfssEC8CtgFtkaTRagugCaAr0b
-sh7Q/G4DJdpyYfrc/Bh1sPWIRgQQEQIABgUCROdwwgAKCRCnyMOy9x7fHM+qAJ96
-TkW12DDfpp2Vh7yQ7XAOLWSd9ACeIgXoB6QJfnkr50NWGPGcWyptyX6IRgQSEQIA
-BgUCQp9Z+gAKCRBY2pavC5ARQejZAKCQwnUq/PSlt/1ZJrUlRs9CHmBoRwCfU9K6
-TDpjXqKXjuJ70/FUMgYV+pGIRgQTEQIABgUCQv15awAKCRA8Y8o/oLPoi/b6AJsF
-+3gUpX/jkVONSz1k202w/e1lKACgv6l3HYZAYdqSAm3tQ3N/KiHwYtqIRgQQEQIA
-BgUCRxzTsgAKCRDHJIY3TSCJ29lYAKCNM+svkCWVQAHzZIO4JP6jPux/gQCggYKW
-M111Wnji5PCbp5O3YJvmYxGIRgQQEQIABgUCRxzT/QAKCRB2nZNyaMUfOAkzAKCt
-6d14E3GfpOMKbGuA4mit5aH8hACggGhclDW4duBlUY4TMlIQpIfoe7aIRgQQEQIA
-BgUCRxzpnQAKCRDaKMI6ef9EdJc7AKCjJ1pCHDqThrzhlHq5JsSgbWIo7wCgvrkP
-9uelD8Kx3pQl83slE1jY6a2IRgQQEQIABgUCRyHmEwAKCRA/3qp6T+sDoVZhAKCz
-t0W8eD6xuh1UmbgHfaaiy9XfSQCg2qrusKGvq6973KRYc9tHDnvvyG2IRgQTEQIA
-BgUCRyHraQAKCRBPq0nLRJVA8vsXAJ909KYRuYPvKv7TWze7XJxOkitKswCdGxol
-DZI/f924xj02V45jBhDdybK0I0FsZXhhbmRyZSBPbGl2YSA8bHhvbGl2YUBnbWFp
-bC5jb20+iGAEExECACAFAkOCOLkCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK
-CRBSNIRd8rkg9ZT3AKDzsUWctob+fKQ8epOUxRzNeCOGjwCg+TOdj9fcp+VEpaMh
-l044qeyWXrGIRgQQEQIABgUCROdwwgAKCRCnyMOy9x7fHAbgAJ0UVN4b20yLsUAO
-mSbICRoD+c2QBACghJrI2BRfTTD4v28CpO+KHMAfgZSIRgQQEQIABgUCRxzTsgAK
-CRDHJIY3TSCJ2+RjAJ42XXIe1LxDowqB6wpLmX6GT4gMWACeMEy+44uiv5g/h3zK
-ROAVXf7b8leIRgQQEQIABgUCRxzT/QAKCRB2nZNyaMUfOBWyAJ9XAxl3FnpXsE0/
-YRbZH8f5/smrxACggGismT89HTLIpCYGeivIKfpoClOIRgQQEQIABgUCRxzpnQAK
-CRDaKMI6ef9EdOxuAJoCln60Mn8Q9BAvLCZYkW6U/JSdFwCgrLDIAFFNAyr9SNyk
-QJuMzjE4R/KIRgQQEQIABgUCRyHmEwAKCRA/3qp6T+sDodvDAKC6YPVppldlSpGH
-E81PmVM9dWv/3gCggMnbO8Of530HZLTV1iJ7Xp6ss3+IRgQTEQIABgUCRyHraQAK
-CRBPq0nLRJVA8taMAKDGkUQ+OePSdZa7pXRmkTCk5ECtnQCgiOwLtY8op8GeKuOY
-+C4U9fqqDSK0I0FsZXhhbmRyZSBPbGl2YSA8bHhvbGl2YUBmc2ZsYS5vcmc+iGAE
-ExECACAFAkQa510CGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRBSNIRd8rkg
-9YU1AKCnMkypeazrf4l8wlGoP6MAOEIySwCfbqKEj4B2vqTshawNg2WQgtJagcmI
-RgQTEQIABgUCRKQUrAAKCRBs20NscgSFJTvXAJ91oqhxXts1Tlr8iyVS7S8YHIN/
-dwCgtuJ16XYK3hLJvckRjjA2n8uhA8eIRgQQEQIABgUCRbAMYgAKCRChmbGhUlG0
-Ypj9AJ4uKxaXLVCWlyfc6z07xMFTOCnkTQCfU7N7SKX9c7UkKFSOdhReIJk+Iv+I
-RgQQEQIABgUCROdwwgAKCRCnyMOy9x7fHFq5AJ9dJE7CgM/w6edKxnwwRBthzgWb
-6gCcDEActChytfUOJ/t4FNqDu54dNF6IRgQQEQIABgUCRxzTsgAKCRDHJIY3TSCJ
-2wvDAJ9juFOqcqU3VQMTHRQESF+wrsWH5ACeLPWkQm5pMISSbyR3o6EpYlOVKs+I
-RgQQEQIABgUCRxzT/QAKCRB2nZNyaMUfOGBvAJ0Q0m2XAaICdOEiZvYtMCMeQbzA
-7QCgkJbrNN+ug2iktCbtcpj2XPN4qyWIRgQQEQIABgUCRxzpnQAKCRDaKMI6ef9E
-dJFfAKDGqTZx3/HeyGmGasG8lD89AUaPJwCeIvYFZMVVLDrKxOL6W7K+IU52uBaI
-RgQQEQIABgUCRyHmEwAKCRA/3qp6T+sDoRP/AJ4jSyeLjW7n7NsnE5MxTc4BSZD4
-jwCgy8pdwzzbRgJHxTpcyTQUrbbZ9WyIRgQTEQIABgUCRyHraQAKCRBPq0nLRJVA
-8iKCAJ9JIK8pRrA35BQQwZEekfrFpIRZQQCgsjBLMf3yHZa4iSIw6kk9Y9iONtK0
-H0FsZXhhbmRyZSBPbGl2YSA8b2xpdmFAZ251Lm9yZz6IYAQTEQIAIAUCRBrnigIb
-AwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEFI0hF3yuSD1akMAoJ5bu6pfwaID
-NwER40e63rPDmGAjAKDsPOE1PBTtCxLWyXU0ClYcCcnA/YhGBBMRAgAGBQJEpBSr
-AAoJEGzbQ2xyBIUlV6cAn31OimaZJZ1vbjBHa74U7ishrMeQAKCIz9BbICSyV7/L
-nEW4UH1KzOQUbIhGBBMRAgAGBQJEpBSqAAoJEGzbQ2xyBIUlB2sAoNP9nOkBM/b1
-+uJFf/hETZa2DQ51AKC7jhlY2dmvKgrTRA4+32hqKjeNmohGBBARAgAGBQJFsAxd
-AAoJEKGZsaFSUbRiIlcAn3SCdkGLaVe8IRIZeYDpFn0+B2s2AJ0bBaZLnP4AiCZv
-zZRW8vPE8HpVWIhGBBARAgAGBQJFsAxiAAoJEKGZsaFSUbRi6hsAnR218E16AafJ
-jbvCu5LnWWeLSsfcAJ9Sj/wtBxxdK+ULwDbQS6FejAtEFohGBBARAgAGBQJE53DC
-AAoJEKfIw7L3Ht8c/dMAoInknun/UQAdk0ODS3J6c07C9GUxAJ9M4VLB6IFssC0I
-Hk5DQBkUZQhG44hGBBARAgAGBQJHHNOyAAoJEMckhjdNIInb2W4AoI03SlFH2Hzx
-xZbspwaXUES3r9tsAJ91hi4/6MtrjyqU8+ol6PTmRdqPi4hGBBARAgAGBQJHHNP9
-AAoJEHadk3JoxR84LGAAnj7mixIDqoSaDeGdIPYGOQi7D6YcAJ92FDVQ81S4BUTm
-nfpqwQxLzysYAYhGBBARAgAGBQJHHOmdAAoJENoowjp5/0R09vMAni7ALYoAdYKM
-lNXWLw0hC3nScSEoAJ93o9M9HuFdfpi9G3FfZHTkm4TiOohGBBARAgAGBQJHIeYT
-AAoJED/eqnpP6wOhya4AnjPC68zy7ToBDB03XcdhiiTLcjULAJ0QsVNxG8i5TBNa
-g6cL0Pp4zlBBcohGBBMRAgAGBQJHIetpAAoJEE+rSctElUDya3cAn1ezpVp7ksH+
-V2BDErgxfXpUH4ydAKCFSB4mTqB0LCnMIsUEHEPCwnt6drQmQWxleGFuZHJlIE9s
-aXZhIDxhb2xpdmFAbXBjbmV0LmNvbS5icj6ISQQwEQIACQUCRBrwegIdIAAKCRBS
-NIRd8rkg9Y8GAJoDmkVmtrrSlwzW/JjqijGvUWnAaQCdELWSXbx+dOdRzOHuERQZ
-sCHEqVOIRgQQEQIABgUCPuJOEAAKCRBxc32m+MTRT53JAKChK/islX9K0ka3vF8X
-ZXU9jTh3YwCguasziZ5SIAfaH0OCuTkmgaFXC+WIRgQSEQIABgUCPs5rvwAKCRBV
-lt0M6b9lPX+fAJ4otEll4qls5epV4Eot4v530spzQQCeK/ykxir+ZuxqxDINUOb4
-ofjAlA2IRgQSEQIABgUCPtKbOwAKCRAiGMgejnwD/3yhAJwNY4MmwKcXm2QWQl7s
-DldHRY7tKQCeMhf7lpvohCN4/nMKfcGaGZu1mUmIRgQSEQIABgUCPtWNHQAKCRBQ
-uyl0LVmn0ippAJ9lLY3s9rV+44Be1I18qdNSFBf8ZACZAfyvFrUfeVWx2MEB2my9
-EQqOuEOIRgQTEQIABgUCPtE3XAAKCRBF5ZBo+Ru3nzK2AKDLaIzkpDhiRT4NYI9x
-WfHkdOBACwCfdmFvuqzSr6yx0rGjhcAwfsIroeuIRgQTEQIABgUCPtF4zAAKCRA4
-mlY8wnKhJiSkAJwNYneXwjvKWjBN4PR6aasrjj4iKACfY3+4NNEi9dZzrfSvGgWI
-RHJrkBuIRgQTEQIABgUCPtJWBAAKCRC3zpsZN6GHxkkvAJ9UitrO+K51AOyHxNPo
-pYStC9QZCwCfZtmL4SxszJSfWF61/SD3eQiQOgqIRgQTEQIABgUCPtOgswAKCRAu
-LPZ7d5amC/8EAKCvpMXojCAQ66DY3unI9yXrbanX9gCgxwQNMU8VIgfWfApdytF+
-oSF2FsmIRgQTEQIABgUCPtQTNgAKCRBJRaU313tD+1hoAJ4mTHXuBkpNYRut/4C5
-ghPdtQmcrACfW5mbgfVQUmkB1rja9uu1SEJ2yaOIRgQTEQIABgUCPtZk6AAKCRBu
-A49e4KODd49OAJ4ufLRQElpvcnmu9GJ8IbyNpzLEpwCfbJHMuVOn4ZPZMU02sl/b
-dojZFAqIRgQTEQIABgUCPta07QAKCRAk8T4/5owAkqiAAJ9qlhWmhk9ZMWmoZXPz
-qddWEQeIfQCg9HG2K4+ufkKZ7mqZxPDHLsJbxn6IRgQTEQIABgUCPtgWbwAKCRBZ
-USdMgY/jQE9AAKCmDfWFHjxauxsRoxlKoufTB6mfmACgkSVtCEP6IXpNWWfD7pKv
-/piEvZWIRgQTEQIABgUCPtlL5QAKCRBRxjMgeX3HX1r6AKCXCdgLIqPlwCfAop7K
-gCnlfLcOjwCg16RYrcuovNnOukVlk6hP1jZ1V3iIRgQTEQIABgUCPwEkLQAKCRAw
-t65wR936hXu9AJwNa6Rdd3MwxVzZeogtXWgOLsFUDwCeKCwG+CmZRH1sjY0s+9ky
-G+0Skx6IRgQTEQIABgUCPwExsQAKCRBdbP1mfoXQM++9AJsFJYyspt8dAQooGPAo
-dLeviDSmWgCcCVq1G1yOoA4thYA0fnHwN3QotIeIXAQTEQIAHAIbAwIeAQIXgAQL
-BwMCAxUCAwMWAgEFAj660GAACgkQUjSEXfK5IPXdHgCgvaapjNit3I5dux5z7Z5+
-HzGKvnsAn11JahGgwLrx2oYcnVJ9AXLdWWJ1iFwEExECABwFAj660F8CGwMECwcD
-AgMVAgMDFgIBAh4BAheAAAoJEFI0hF3yuSD10rYAoPj6+3K7NhOY/PFGibV06bvv
-pJzXAJ9fFz9s5nNPZ59eOt6RByKh4aWTE4jcBBMBAgAGBQJAGjk1AAoJEMKjXUok
-OhMpIPYGALZOGKpQWUaTywSyPJ5uHJqMflFnkKSy3A+++3HwDpUrUK6UlLzNL+AK
-5BzEbso71zdkkP8J/4nhHfqgFKLh0ezwZXPEpZABMT5Yvolm0GbjauMEwYPQkLEb
-d1Ae/ODPUUWlsNHc2PV++o46ECOzj1fpKR5cW7gFIJ1uhwvqL6AaeIlbQ9d8YMyL
-jQAyE+EX+uBLrkNlswm4aVJd6A9pi2H0SAXAMzYz8B/w2mD6TUNp9IHHO7WTstZ8
-hq6ZKXlDoohGBBMRAgAGBQJAGjl3AAoJEAvgKygRZSHZZYIAoIvNma1q1npAhWqA
-NA7kqae6g4dGAKCtpLvhTRJ2bjC0KCu+YOv0g0AG9IhGBBIRAgAGBQJCn1n6AAoJ
-EFjalq8LkBFBR9kAnRcjAn0EgF2n1O2a8fCmvZuxQI1CAKDOl5mKasdNQ07GyNtg
-ZZ13qO4/P4hGBBMRAgAGBQJC/XlrAAoJEDxjyj+gs+iLSUkAoLriSKhT9/4Syweo
-b6ZBBRYrWAsRAJ9FMkdsr7feuRR8md4Uqr/arFfsDohGBBARAgAGBQJDgyajAAoJ
-EL3QUagWtvhNNlQAn1AfAL8OWwWqrXNUqFaB3yG+zx1WAKC7tpA63saeOIvNnb7I
-cAlXqkPiILQgQWxleGFuZHJlIE9saXZhIDxhb2xpdmFAYWNtLm9yZz6IYAQTEQIA
-IAUCRBrvlAIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEFI0hF3yuSD1SY8A
-oJ8698ppRhlkXQBWTaYjyq6iTjhBAKDIgFMSer2ykHA+EbVoPRBgEMwl4YhJBDAR
-AgAJBQJEGvB6Ah0gAAoJEFI0hF3yuSD1jwYAmgOaRWa2utKXDNb8mOqKMa9RacBp
-AJ0QtZJdvH5051HM4e4RFBmwIcSpU4hGBBMRAgAGBQJEpBSsAAoJEGzbQ2xyBIUl
-bbkAnizBAkYVmNUh6UCYhnQOpqxIP3slAJ0dM6Ti+ZHB8a8Oji2HRkRfFM3/UYhG
-BBARAgAGBQJFsAxiAAoJEKGZsaFSUbRi1e0AnjkBdktDgmHOXE598RImBDBUJiPV
-AKCcbLDelA5ZBGIQZVmAHSqVHYMh04hGBBARAgAGBQJE53DCAAoJEKfIw7L3Ht8c
-zlYAoIPLRni0APdwdUco81fJV5uwHc2AAJ9ilwHBkQxnMGQIgCFk3cOiTcjrI4hG
-BBARAgAGBQJHHNOyAAoJEMckhjdNIInbwUEAninx+OkD8JvNDK93xQS6zk7mFmSf
-AJ0QhSrSUDUQElR9K+REEi9RKYMkJohGBBARAgAGBQJHHNP9AAoJEHadk3JoxR84
-Yp0AoInlyDHTXF5jK+7+1qm/4IZoiNHzAJ4/XYv7Q7W7jlBqlggafL5kE3jM3ohG
-BBARAgAGBQJHHOmdAAoJENoowjp5/0R0AdsAnjQ0aDb0R7Dm+3FT8+Tpz55GKi6k
-AJ9ZdFA380MzOAiiqyhg6iPytArbEYhGBBARAgAGBQJHIeYTAAoJED/eqnpP6wOh
-BGgAoJo2OKiN+Y6g2pHXlgQURZf16+HSAJ9KxvwWHzmTjc4By99H+EzF4ft9oIhG
-BBMRAgAGBQJHIetpAAoJEE+rSctElUDy1GcAn2E0YQ075RvvwlRhbshacr16D9nE
-AJ9m6FlHNjugAoZF+cicoE3IHzLdf7QlQWxleGFuZHJlIE9saXZhIDxhb2xpdmFA
-Y29tcHV0ZXIub3JnPohgBBMRAgAgBQJEGu+4AhsDBgsJCAcDAgQVAggDBBYCAwEC
-HgECF4AACgkQUjSEXfK5IPWoDgCcC/UYWmAa+dLVyAgNuSj8hk35x7MAn0M8FdfP
-iCA1O+yNdG8We1cEvcG7iEYEExECAAYFAkSkFKwACgkQbNtDbHIEhSWiIgCg81MM
-Tcu6CX68n9iscKF+L5nCbQYAoMbEChbuOkiUVFXhZXrUazdN5XXGiEYEEBECAAYF
-AkWwDGIACgkQoZmxoVJRtGLraACdHY/uONLJxWiBuQXPgOLtBujkD78AnjNuCvNA
-3VlRHOyM4RiGSE2yKp51iEYEEBECAAYFAkTncMIACgkQp8jDsvce3xwx4gCgpXfL
-msK3kpU9VV1KPIRzAFX6LLMAn0UJA8ti3I78YQSA3OlYAxAt/b1iiEYEEBECAAYF
-Akcc07IACgkQxySGN00gidtxxgCeI/chiIyhlt8q1SQuKo+Bj6ShMQcAnjpLUjmD
-v6dUATvNBT+N31YxY/DaiEYEEBECAAYFAkcc0/0ACgkQdp2TcmjFHzh0MwCfS4h3
-cDVES4sITBXA3pIzRlx7+l4AnRfu3kYkyuAwFx74kFkdIzt6BlMJiEYEEBECAAYF
-Akcc6Z0ACgkQ2ijCOnn/RHRA6wCgglmdSd2qKD/w36JBGa/p4nJNAoQAn2XGI+OL
-7WwNqLdK3Sv3BnfSJIy8iEYEEBECAAYFAkch5hMACgkQP96qek/rA6FYQQCg593/
-Ry65COuBmSSfDYmJ17oRM5gAnRsLgZDxtu4kRcbPay/EMPCEtg2PiEYEExECAAYF
-Akch62kACgkQT6tJy0SVQPLYsgCfc48EVx8tMTMlF30tW9Ncyqp/PSkAnRxkM6J9
-0lVa9v+/VVI0fhxB8zhutCVBbGV4YW5kcmUgT2xpdmEgPG9saXZhQGljLnVuaWNh
-bXAuYnI+iGAEExECACAFAkQa784CGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK
-CRBSNIRd8rkg9XAJAKDx8kyIR5X/eSTIwL+t0ego6qiy8ACdHxgdT1Vlpp5GeOrr
-4KH3s7aQmXeIRgQTEQIABgUCRKQUrAAKCRBs20NscgSFJTe6AKDdJGOr2RLI0gHT
-BKv4pJB47RuRUQCg36MiEImBsAzA0Y4+p/ijbE9XQ2CIRgQQEQIABgUCRbAMYgAK
-CRChmbGhUlG0YrbzAJ0XWVZsROQYbqAmmnE5GkXNrFcR4ACdFPnOSoogQvP/b4J1
-IarQCZtGlRWIRgQQEQIABgUCROdwwgAKCRCnyMOy9x7fHK+XAJ9CXmSjAxVi3hBg
-UJtlGqwg6Cl4NwCgoWbC/tVuTpUS/+/fqd3hi8q4ES6IRgQQEQIABgUCRxzTsgAK
-CRDHJIY3TSCJ2xI8AJ9doBXm3c7j06IcxrNxI7XxS5J4UQCdEC/i1k7VCcAqDubo
-zx3t1lnRPW+IRgQQEQIABgUCRxzT/QAKCRB2nZNyaMUfOBxdAKCydS5uswQzWZ95
-AlBGcJ9ZdX2NYQCfSPeuCXRGY1ryJNzfWec2cfq7PluIRgQQEQIABgUCRxzpnQAK
-CRDaKMI6ef9EdNQoAJsHJX7alox28tLBtSkKokd3PNlvyACfeimh/PazB03IvnKt
-ptwQKaowFgyIRgQQEQIABgUCRyHmEwAKCRA/3qp6T+sDocTiAKCmp1Anb8MaWsH0
-HH15+31FmS2Q0wCfV5GxoOf1xP41AE/LQzyW7cs0NMyIRgQTEQIABgUCRyHraQAK
-CRBPq0nLRJVA8pURAKCQNwAoZgZ9ayHo6Jl4Vqc2R3o01QCfReCxa8/zHemmpVto
-LuhNdvb313q0I0FsZXhhbmRyZSBPbGl2YSA8b2xpdmFAZ3VhcmFuYS5vcmc+iGAE
-ExECACAFAkQa7/MCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRBSNIRd8rkg
-9UwaAJ9Cd2GWiqnp3o/GLNGQnJKbR2KnAgCg9M8dOpI6IhlHY1TkKZBH9tkw/SCI
-RgQTEQIABgUCRKQUrAAKCRBs20NscgSFJYjmAKDEpVsUlrr76gYOQ58qgnzB7vLD
-AwCgwc9+ST2LOOQusNFcgUezKEXwOB2IRgQQEQIABgUCRbAMYgAKCRChmbGhUlG0
-YpneAJ4u0JFJWtitR64oI9sKAHs+sozrEQCfdz636TUoiECfzar4a0TyZRV71C2I
-RgQQEQIABgUCROdwwgAKCRCnyMOy9x7fHE3pAKCV6wuH/YicDRlIxHPOziLrgtMF
-qQCdGlK4/o9llOjs5Xbwhyxt6EGkbTmIRgQQEQIABgUCRxzTsgAKCRDHJIY3TSCJ
-2yyaAJ4iLqIeN39tOQO/ERzzaMqxkJQoOwCeKVdkZKQNGYDfFJRhvk5JK+QkYaqI
-RgQQEQIABgUCRxzT/QAKCRB2nZNyaMUfOLooAJ9ijYlAJE9FelumxQYsUuOQF4er
-EQCfYwKA9yRovZyqYr/QdwOsG/yWCsqIRgQQEQIABgUCRxzpnQAKCRDaKMI6ef9E
-dH0mAJ9djNUYo39cw62LSWoOtMgKJvfb0gCgzbwIDLy8oP83jDeNS9XeNC7gHoKI
-RgQQEQIABgUCRyHmEwAKCRA/3qp6T+sDob/PAKCFnujsijjQ6fqe0FK3LC4ukCE+
-qACgsf+0xt+X0U8cFor93bNbLFC4YveIRgQTEQIABgUCRyHraQAKCRBPq0nLRJVA
-8mTvAJ97SzawuY5BM3YVPZbk8N/T2xIaXQCfTSho+NGdmWqdSeyIRe5/R6fiD3+0
-I0FsZXhhbmRyZSBPbGl2YSA8b2xpdmFAZ3VhcmFuYS5jb20+iGAEExECACAFAkQa
-8AwCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRBSNIRd8rkg9XpsAJ0YopoN
-21u9VSt5roOEY1bCdUC0MgCgmTTP7vg2ZnN1yPvH8s+5hKhKgwGIRgQTEQIABgUC
-RKQUrAAKCRBs20NscgSFJSXUAKD3pFaug4aSfQ2uyPQrmABHEzbv0QCg77VISd/v
-XGJ+l59aOFugclkSu+6IRgQQEQIABgUCRbAMYgAKCRChmbGhUlG0YrWFAJ4hIxle
-056A2kS9yzBZx1ul5Od4zACdEXty1e8aGCs3y5xvgJ4r7vCzzqGIRgQQEQIABgUC
-ROdwwgAKCRCnyMOy9x7fHGaaAJ45D5hU6dBkfR4VA2Fs7AJ5ADLu6QCcCCpGNGce
-jLPrCGZKjVBNCfg3yiWIRgQQEQIABgUCRxzTsgAKCRDHJIY3TSCJ2zoDAJ97ned/
-EjfSxSn0w+rLtDExVpcLaQCgivbHwBYyypLVVbludyyFGnrMxumIRgQQEQIABgUC
-RxzT/QAKCRB2nZNyaMUfOGwUAKCbiU5jDc6GDQl6lupR7pStRql3DgCdHBjeB+tb
-OSl0rfr78fZngqj33f+IRgQQEQIABgUCRxzpnQAKCRDaKMI6ef9EdDVoAKCK/YpD
-BNeb7gLmtKkYTVVN6OYbuQCdEQngfxMRec6XfU2bd43K8iDA8yWIRgQQEQIABgUC
-RyHmEwAKCRA/3qp6T+sDocInAJ0Qu5HqplOP6odq7K6Qf6YGkEeh/wCg1j8OSABf
-TYMpLE31HlqI1Gc4kiqIRgQTEQIABgUCRyHraQAKCRBPq0nLRJVA8vUiAJoD4ris
-Wa7b0pn0CUDTMEZY2rjajACeMLTSxRc+djoXMMxq6SBFoAm+czm0N0FsZXhhbmRy
-ZSBPbGl2YSA8YW9saXZhQG9saXZhLmF0aG9tZS5sc2QuaWMudW5pY2FtcC5icj6I
-YAQTEQIAIAUCRBrwPQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEFI0hF3y
-uSD18tMAoNq5oF4VBEoyvwcVm5cp7bP5mx9eAKDlThy8zJPdqoGTrsFp6/mMMNWU
-DohGBBMRAgAGBQJEpBSsAAoJEGzbQ2xyBIUlkpYAn3yUFeO+qVylXqmLoNIo/9j7
-FOyhAKDX1Lz5zUApOwiCFvIgg+oj9crn54hGBBARAgAGBQJFsAxiAAoJEKGZsaFS
-UbRibikAoIEijwo+Q7IsCMwcI5hkc4ZMXKnKAJwLXqAo3+NrQETJmyT35cwWSFGS
-OYhGBBARAgAGBQJE53DCAAoJEKfIw7L3Ht8coPQAoIawwqBwjirFx3i+TLmev45o
-ajKgAJ9b8uPbRSpKRt7g/pJX5T5hmry1A4hGBBARAgAGBQJHHNOyAAoJEMckhjdN
-IInbt6UAoJKrF7r6ZBwtOESf+0yLPZk2IQTSAJ4rF1Axk1JEbwxDhU8xiR+NQs8h
-3ohGBBARAgAGBQJHHNP9AAoJEHadk3JoxR84yBAAn11kwxqkPiDaibQwrSptcLsE
-GtIVAKCeIxBzFC+sylFk6YKnDTEPXLB3d4hGBBARAgAGBQJHHOmdAAoJENoowjp5
-/0R0cUAAnijyMfYTJXrHIqUziCIDcDuvLW7JAJ9mkAdHZvM2gP9xIWuIVjZ12oct
-5YhGBBARAgAGBQJHIeYTAAoJED/eqnpP6wOhtd0An1wd16vE2YrODDnWvRsWN+tx
-uVnhAJ9d1od51VYC19+4DdsNG9jPWnVc1YhGBBMRAgAGBQJHIetpAAoJEE+rSctE
-lUDyETwAoIXpuZG7ssYUeJrXLtaDzjE3O45RAJ9U3IRsunOC4As3YWDXeuwLXyKb
-NdH/AABUpv8AAFShARAAAQEAAAAAAAAAAAAAAAD/2P/gABBKRklGAAEBAQBIAEgA
-AP/hFk1FeGlmAABJSSoACAAAAAsADgECACAAAACSAAAADwECAAUAAACyAAAAEAEC
-AAgAAAC4AAAAEgEDAAEAAAABAAAAGgEFAAEAAADAAAAAGwEFAAEAAADIAAAAKAED
-AAEAAAACAAAAMgECABQAAADQAAAAEwIDAAEAAAACAAAAaYcEAAEAAAAAAQAApcQH
-ABwAAADkAAAAoAgAACAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAAU09O
-WQAARFNDLVA5MgBIAAAAAQAAAEgAAAABAAAAMjAwNjowNDoxMSAyMzoyNTo1MQBQ
-cmludElNADAyNTAAAAIAAgABAAAAAQEAAAAAGwCaggUAAQAAAEoCAACdggUAAQAA
-AFICAAAiiAMAAQAAAAIAAAAniAMAAQAAAMgAAAAAkAcABAAAADAyMjADkAIAFAAA
-AFoCAAAEkAIAFAAAAG4CAAABkQcABAAAAAECAwACkQUAAQAAAIICAAAEkgoAAQAA
-AIoCAAAFkgUAAQAAAJICAAAHkgMAAQAAAAUAAAAIkgMAAQAAAAAAAAAJkgMAAQAA
-AB8AAAAKkgUAAQAAAJoCAAB8kgcA4AUAAKICAAAAoAcABAAAADAxMDABoAMAAQAA
-AAEAAAACoAQAAQAAACAKAAADoAQAAQAAAJgHAAAFoAQAAQAAAIIIAAAAowcAAQAA
-AAMAAAABowcAAQAAAAEAAAABpAMAAQAAAAAAAAACpAMAAQAAAAAAAAADpAMAAQAA
-AAAAAAAGpAMAAQAAAAAAAAAAAAAACgAAAJABAAAtAAAACgAAADIwMDY6MDQ6MTEg
-MjM6MjU6NTEAMjAwNjowNDoxMSAyMzoyNTo1MQAEAAAAAQAAAAAAAAAKAAAAMAAA
-ABAAAAC0AAAACgAAAFNPTlkgRFNDIAAAAAwAAZAHAJQAAABAAwAAApAHAMgAAADU
-AwAAA5AHAMgAAACcBAAABJAHABoAAABkBQAABZAHAHgAAAB+BQAABpAHAPwAAAD2
-BQAAB5AHAMgAAADyBgAACJAHAMgAAAC6BwAA///////f//v////v//////7/////
-///////9///////////////////////9////AAAAAAABANEAAAAOAN28hwCGDCUA
-cOcAAAAADAAAAAAAAAAMAAGqIf8AANgAAH3VlQD+AGB9QKMOrQAA/Z8AAABKMACI
-fQVKMFuIfYq3AAAAAOfGAL4AgQAAAcEAAAB4AADYPgAAlQAgvzBbircAEQBSXsTY
-owAAlZkAAQAAAAAAAAAAAAAAAADNAIoAcABsAADnxgEA8AAAUmFaAHzEg////ywA
-AAAAAAAAAAAAAF4FxAAAXP/QgHAWXmAFxAAAAAAAAAAAAACKNgAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAbQLcAfQAM8AAAcAB9xgBw2XDZcBZAAAAA3NxzRgDsACAA
-BwA5AFAAvwA/AC6Hf7e3xU7wTiRHxXc5TrQAAEz2TPYENdcAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAA/wAAKgBaAJEBAAAAAAAEBQFQAQcA/wEBgSz9by3DLS0AAE+4KAxi
-9uyjhwPwRAeJJHQ5Hicf/3sf/u0GfgBeai13IYZUvb29vb29vb29vb29vb29vb29
-vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vSAIGwAB3Ny8XxAA
-AAgBAfkB/AgIAMQAxADEAMQAAAAAAAEAAAEBAAAAAAB9AAAAAABPAAAAAAAAANgA
-AAAAAAAAAAAAAAEAAQAAAAAAAAABAEAAAAAAJOygz2kAAAAAAAAAAAAAAADTAQAA
-ABv//wD/AAAAACQAAABYJAAAvf8AAM3EaQAAAAAAAAAAAAAAAAAAAAAAAABAAEAA
-QAAAAAEbVRuECO8AnwBnG3obPRt9ARcA1gCMALAIPxt2ASIBTABkASsBVgg0GxEI
-QAE2ATcBBgFfGx4bPAHBCFkB2QHzAR8IuRsyAVoIGAHyCMQIXAESCH8IYQhXAWgB
-JQHUCAEI4ABlAFQATAAAAOcAwgDjACMAagAEAAAAAAB5AMQAUQBAAAAAGwAAAAIA
-xADTAH0ADgBAAAEATADEALsAiAC2AEAAfQBRADMAVgC2AJIAGgCsADMAIwAwAMsA
-4gCHAFwAywCOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAA2NLYZF6aXuZewA7CXmZesF652LnYLtg12KvYyNiF2FIA
-owVWBSKKMnCOcAOKj4qIBboFxM156s1WkQRf518OJ16dG6wb3BtPGxEbtBslG0Qb
-RxuRG2wbDhugG+Ib8BvwG+4Ao0B/QLVAeH092HB9J0AfQOpAgRtyG4UbhhslG+Eb
-PBuaXvsOqQ4aDpQOnQ4pDjIODg44DiNeTQ6gDrZe+w55DhoAo4o2ikNwkXAUcO22
-JLZJto9pXGm9aQqIKIhfiD8gaSAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAEAAgAEAAAAUjk4AAIABwAE
-AAAAMDEwMAAAAAAKAAMBAwABAAAABgAAAA8BAgAFAAAAHgkAABABAgAIAAAAJAkA
-ABIBAwABAAAAAQAAABoBBQABAAAALAkAABsBBQABAAAANAkAACgBAwABAAAAAgAA
-ADIBAgAUAAAAPAkAAAECBAABAAAAUQkAAAICBAABAAAA9AwAAAAAAABTT05ZAABE
-U0MtUDkyAEgAAAABAAAASAAAAAEAAAAyMDA2OjA0OjExIDIzOjI1OjUxAAD/2P/b
-AIQAEAsMDgwKEA4NDhIREBMYKBoYFhYYMSMlHSg6Mz08OTM4N0BIXE5ARFdFNzhQ
-bVFXX2JnaGc+TXF5cGR4XGVnYwEREhIYFRgvGhovY0I4QmNjY2NjY2NjY2NjY2Nj
-Y2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2Nj/8QBogAAAQUBAQEB
-AQEAAAAAAAAAAAECAwQFBgcICQoLEAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiEx
-QQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpD
-REVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJma
-oqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy
-8/T19vf4+foBAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKCxEAAgECBAQDBAcF
-BAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl
-8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6
-goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU
-1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/8AAEQgAeACgAwEhAAIRAQMRAf/aAAwD
-AQACEQMRAD8Av49akQDIrmNAYUqdRTAGAzSoOaAFIpAKoQwClHHPp3pgI9xAvDTR
-A+hcUqOknMbqw9iDSG01rYkYc9KaBzTJFYCm45oAVqbj0poAYcCmYoQhSMGnr1BF
-Zlg3BPpSp1oAG6mlQUxCkcmkHWmBn6pffYYQVUNI5wueg965u4up7k5mlZ/bPA/C
-k2d2EpprmZAcYpUJB44+lSd1rkvnyq4YSybh0O45FdLo1617bHzDmSM4Y+voaqJw
-4qmrXXQ0W7U3HrVHAKw4FNpoQMOBTDxQgFb070qVmWOYHcaVODmgAYc0q9aYhzda
-bjmmI5vxJJm7SP8Aurn86x6lnqYZfu0GKcopHUkBrV8OSbL54+0ifqOf8aaOfEr3
-GdMRwKbWh5Ap6UyhAKeVph5oQCt973pU+971mWK3DU5aAFYc4qMTRK+3zY93TG4Z
-piI76/t7EKZ2I3dABkmoItYsJsYuAh9HG2ncuNOUldHM69c+bqcrREMvADDkGswS
-uT941SimV7WpBcq0H73/ALxpRK4/iNPlQLEVV9oUzOPQ1e0e6C6lbk/L84HtzxUu
-FtjX605JxmdsegptNHGL/DTDQgF/hphoAHxu96ctZljnGeaRaAOd8TXVysgiV2SL
-jIU4z9aw7WS1R83UEkq+iPt/OrhsSzdm1O2vrcQyQYiC/Iw6rXPysFchG3L64pR1
-ZspuC0GCZgOOtTQGWbhE3YGTgU3FG1PFS2auTWscl0WEaDK9QTipWsZ+8GfoRUPR
-nWqsJrVET2b94HH0qBoViILFkOcjNUpMwnSoPW9joU8TEqA1urY6kP1q/Yaqt9Js
-W3kXjO7qo/Gmmc86CiuZM0f4aaaaOYP4TTDTQCuOhoWsixLqYwQGUIZMDkCs3+0p
-JAPLCrnv1qW7FRjcr3EpvAI7pFbHRhway7jSVEg8mUKpOPn5/UU4zsNwJY7JRbMR
-IWKqdyhfmGDg8frVb+xpgxDyxhfUc5qk+W9xP3rEEuk3KsNu2QZ6r2qZLW7jBhUA
-B8FnJ4p86Y4xaL1o7WvyxJA3OSUk5NXnuBs3rEAfWpbN4vQoy3UjsR+4T/efk1n6
-grGMFgMg4yKpbmc22mSaFp01xfRM8TiBTuZiOD7V2wAAwAAPQVZztvYXHymmUkIP
-4TTTQgFPQU1Tg1mWPY8Cs29t7VTkDbK3QKcfiaTSsNXvoQGNEjGFaVh1PY00gNCJ
-Y025YnHp/nFQasRXlBDbTnsRUd8WSzkZOHHI470LcT0IYrhhCkjAMHAORxzU6Kkw
-+dQc9u1O1maRVyUwRxoSI0UDnIGKhxmHd23ZpsuyRMYA0e/y0YEdQoqARKZFUqCp
-OMEUyZJWOkRAke1eg6CitUcQfwmmGhAKPummHmgAP3RTQazLIbu78s+VEN0p/Jfr
-VTasYLyksx6nqSallxQ6NLhyGDhM9ivamyKYjJuwMYbisI1lKXKi3qMWZGYKMGp7
-eEvk7dytkYIzx3oqVORXQrGZf262c6Qpny2BIX+6fb25qe3+UA+laQnzJMuDFuJz
-Mfm69hShytoVaNfY5rQq5HBMy8DoeuKntRuvowR3zj8KFuKb91m4p4NNNanGKOhp
-poAQdDTTQAh+7VK4vD5hgh5cfeYfw/8A16yNErsgA8p/U989asWn7wF8ZOT+Fc9e
-VoWNLWRYaVYiAzJn0ByRVa7iknBe3Zd2MYzXFC8JKTWgzKiWRNSW225m6sqjgDHU
-10cA8pEjxwB19a0xM9EgWpBqNqtyhO0b15UntWbFgD6VrhZXVhoJQAcqAGHTI6j3
-p7SEW4IigDkY+70rssUrNbkUGFGDySdxNWrBd15u/uqTTW5FR6M1h0NJWhziimmg
-QCmGgCheXmMwQnMvc9l/+vTbSFIU/XPrWRskMnIlcKoyxOAKrzWlxHCysWEaEsCh
-4IP61nKN9SnKxBHYSMu5GyO+OtdXbxwpbrtUbVWqoyjK9iajMjw/FDdfa74hlkml
-YAZzhR2pmsyS2t0iwu3zLniipShvYUG27EVvq05cRzqWU8FsYIqCCYs7BuDk5Fc9
-OChJ2NSSQENnPHY+lP2BouXww6eldaGmQIDkk/iau6bJ5fmMyHDEAGolUUNWTJXR
-fW7g6F8fUU4TxHpIv51aqwfUwcWSKc8ggimnitESAppoAyrW2Ea/1qSZxGpPSsmd
-CFtVMdvJdsuW2kqD6f8A16opdmR18/5i2G9h9Klu2glqy9E4RyEABNO1a5Ftpc7K
-SrsNox3J4rlldVY8r3G9h+kp9l06CNc7tm4/U81XvZ/NlAyBgfNu4NVGdSbu9hJJ
-FZJduEjAb1JqyLRZACysrdmApVanLaxpFD/s8qD+Fx9MUzyGPSA044mLWpVhfsp4
-87Kj+6o/rUhO1dqJtFc9Sq5vyFsR7Q3Tr3FKI/16VndgPUNFhkYj6VbjlDnaeG/n
-XZhquvKzKpHS5IvWmmu8wKbHYMCqLnz5wpBKLy2O/tXPKSW5020LrPLIu1P3a4wR
-1rPuLTy1VUz/ALPse6/1FYe15pW6Ao2J7RhIqseHHXmnahZG8SJWk2qj7mGPvVlU
-k41FJlJXVjUsMK7KcDcoPPbFU9Ts4p7vejAHGG9zW6mlQTZH2x0FvFbjKruPdjwK
-l+0DHIFcE5OTuaIUXK4PrSG4Kj5uakY03CnrzTCEk/1ZwfSmIjPBw42sO9OBxweh
-9P5igB46lT36GmyKdgI4ZDTi7MGT283mDB+8OtPNezTnzxucslZma5aRti8E9/Sp
-kVYyVH3QAa4K8rux0pD14RfrikmQSKQehH5Gua+oyxY2EKKJyMu4BOen5U2YDJA6
-ZrtxUUoIyg22xoGR1P0pGlKD7mAO9cN9DUrvewsRvmX1AHNNWeJsMr78jIKqTVez
-la9hcyFNzEGwCxbGSApz9aaLiNidockDJ+U8Cj2cg5hGu4F5diOCeR2FNN7b7io3
-EjrhSaapSYuZFmOTzogwHmIeQwOaXGB0JWs7WdihV5+Un3VqkJ3puI56NQBWB2Pz
-ng4OP51bV88Hr6+tdmGqWly9zKorq5UhUq2G+9nmpBjfg9CMGuaUuZtmwKMxMvcZ
-ApQ25c+oqOoiXe4jCbjtHQU1m+79a1lOU/iJSS2EB9Diob5zHZTEn+AgfU8VMFqh
-sxdmZNvzBg/3RkEYwMn2wDVi3x9jlLBiHVR8pwcsxPX8q657GaFKBXeNiqqp+Zdw
-GWHTnbjj0ochIj8hZZxgqQFIweDx259KncY+7tpYbSdjsC7MEDBPUewqm6F5zs3s
-3mExhQec45z+FOEk1cTRq6Yw+ycggh3G5fqaueYw54ZfUVyz+Jmi2EKrJ9z5W64P
-ehGySGGCeCKgYxlw4JGexpw4Hupq4ys0xNXIs4O4dN3P0p7DO76CpKGxPlnPuDTo
-udo7Ak0CY5364ppyy49OaokeOxxwaZcqj2zrJGzqf4R1J7UR0eg2Z4t4jGubSZuO
-V8xsZ54/T9RU6xq03FoRhlAySAMd62lJv7RKXkBjLbma2YOeTtLDcfwpwtk/cObX
-k8uMFivp+tTzW2YWHmS4fg2+FyQcjORg/wBarm3XB3WUY4/hHOcD/wCv+VJOMdpB
-qyzaQ+XbqEUR9cp6GpsOvODWctXcpCcN22tSkbx6OvepAMgsPcZpucOPQiqQM//Z
-/9sAQwABAQEBAQEBAQEBAQEBAQECAQEBAQECAQEBAgICAgICAgICAwMEAwMDAwMC
-AgMEAwMEBAQEBAIDBQUEBAUEBAQE/9sAQwEBAQEBAQECAQECBAMCAwQEBAQEBAQE
-BAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE/8AAEQgA
-egCiAwEhAAIRAQMRAf/EAB4AAAEEAwEBAQAAAAAAAAAAAAYEBQcIAAMJCgIB/8QA
-QRAAAQMDAwIEAwUGAwcFAQAAAQIDBAUGEQASIQcxEyJBUQgUYRUycYGRCRYjQlKh
-M2KxJXKSwdHh8BckNEOj8f/EAB0BAAEFAQEBAQAAAAAAAAAAAAMEBQYHCAIJAAH/
-xABAEQABAwIEAgYHBgQGAwEAAAABAgMRAAQFEiExBlEHEyJBYXEIgZGhscHwFCMy
-QtHhUmKCshUWJDNyokPC8ZL/2gAMAwEAAhEDEQA/AL3oaCnFeK22oNKA3JTuI54U
-U+oJ4OOdHdAaaXKjEICHFILYITkLWQcIP0UOAfprNTyJb05VZuYkid61VOKyoh1K
-AnwnSlxO3/D3cEEfQhOnm3ikPxmnGs7XklKinnbnBAP03D66OUAsAnuigZjmNJqt
-FZS842tCdm8htzGVtHuOf6fTGnC3o6G5KApfCXQtJQdq0Z5woY9/XsfQ6WrTDQM8
-vlSdClFVO89tBdcKUoCvGOUjuMH1H/nGkrMdIfSoNpKVpKQdudmRxn6Zxp5AHVA+
-FI8xC6EAw0hbrjjaShS8OpyFKbUk4H/TP019OvQqWl+bNlQqfDjK+ZXPnSm4MJhJ
-5JU+4QhOCFdzpepIykHaKEjO44lLYkkiANSdogDf1UF1z4nfhypSG2qt1z6P0+c2
-CzIirv8ApjjqVJVyFBDqsHTlavV7o3erwFodVOm1eec+7Ho160ydIczyAGUvbzz7
-D8tJmLm16oI60ZuU1JrvgrjOztTiV3hNyi33zqYcCY5zl0HidKliqPRlCC2lbe9b
-P8JJdSlbpHOG05yoj/LnjTS2ylSidnn3e3JwexPodL7aDbJjlUTckLP19fXKiCox
-m1x2XC0PDXlKsjBHA5+mCDoZlx0Jd3bEhKxuBwBnHHf/AJ6LapHViguqUFTSyY3u
-gsqLYQpC+VJ7q47kfhobVGaeVuQA04nKioDhR9yM9vw/volqiGdOZ+NCcWSvXw+V
-b3o4FJUggKV4wykgFPrgg/kdCK2ElS07UkpyEJBz2PocenfRbRPYMcz8aA8da0iP
-wP4RPHfZ3/trNfSjlScExuKWSo6ok0uhIAWVBaSRt3A+YfgoH9c6MKCNsmK+24pb
-S3A0plZzsJydvv7/AK8arN2epFTsqAUY8PlWqptliW8tPnjOKLKwTnZ7IUO/0CvX
-8dPFvBlEtsLUotK/iIc2eIoe2SBjIPGeARogVLMDl+lBJ7WlfNR2fMvoebWhwOKS
-jAwhWCRnH4c406UNnO4K3ENAPNqSnCkAZxg+3fB7jS8E9QJ8PlSYGVb86dp8JaJM
-hW/cCpQIAyQRyk5+o02spb+YbS5kIU4ChYJBQcjIP0507p/2PUPlSMntCqF/G/8A
-EtL+F/pyq4KDSqfVr0umsO0G1W6mlSqdT1tN+LImyGwQp1DSVNpDIIC1vJyQAQfN
-B1R639U+sdRcq/U6/rhu99Sj4UCoz1ig05JOQiLT0bYzSB2AQgdhkk86bMXu3A6L
-ZvYRPmRPw/Wt5+hx0c4FiVneceYu0F3DbnUs5hIQAlKlLTOy1FUZtwBAiTUPOyAh
-tSGQlvcNvkSEfhgDX7SnXkymt6WyTk71NgqH4HGdNAKeqM1u95ht25Q2nVOgI8yK
-NnK3V4suLIj1eptvxMLhupqkhLkUjzAtL35RjGcoIII16Df2X3xV3X1loV19Kuot
-Xk3BdPT+BGrdv12ovl+sVmjvOfLOMynT5nXYb3g7XV5WpuSkKJKAS6YG8pt1AB0V
-ofl7/rWsg+lhwJhr/CV1ilswlL9mUOJKUgHKpaUOJ0A7MKzEfxJB512GmIQiBHVu
-3tLJwpQJW2cDkH6Y50NOsFsDxAHI61ZQpGFFHPt6en059dS+20ZHr+NeZrp7UfXd
-S11vfR1KICtroSCAPMMHnOhZbR3BbZACPv705x6HIHpo9qZbPKT8aTuaKB+tqXPR
-1fZijkBtRCsDkIySBg+2hCZHU0pJJxncMpHORyD/AK67tIhUcz8qE8diOQrSFjAy
-0knHJCwAdZo2VRoITpt76XzgZC3o5GZLLu5oEYceAJSpOP6u4I9x9dO1vNKLrakK
-OUKS4pCj22kZVjvxyCO41V60ktA/XdU6V+KKX1SO+zPkKCSW/EO4p8yFDPB4/Ig+
-+nuiN+BKZkJWwlh5OFlCTsBB53JByk++3I+muwJYSoeFBEZor5rMdTcx5SQnale9
-Rzk8kALH4D+2llFbHirLbS3MD+MhK9gT7kDsQfb/AJ40uGtsCBy+VJiPvKI6p/iq
-ebTuQQAscpIwMYP1wRz+uh1bSQ62sBS2lLH3RgoOex/TTugfcgnl+lJDqrSvPZ+2
-bu0/+ofTOxm3gUUq3JVxvtJXuTvqE0ttqP18OAODriWVFQ3bsHv9e/bTDfKKrlaj
-zA9yRXqN6J9ipjonYWNOtdeXP9eT/wBa2NxlOcryBnHGTnP1+mnuBAJWlQ4CTndj
-OPfOmx54AR3f/K1nhGGKCgtR8ffS2oAFSVBY3Nny4GBxn/z89Xk/Zp3m5ZXxddN0
-eMW6ffrNQ6fzQVYQo1CKpyMlX1EqLFx/vDSjDXMi2z/Mn4iqf9IbCU3fBWMNgTnt
-HgPNLZWn3gRXrIcaS5SULayQl3kEYWCQeP7HA0xoCmVjcMNk4KVcJGePX0z31Yds
-mWo8T8a8VnQQv2fAU5PBJprgwPK4HPCBy0cn0x6EaEloCFjOdqjuB24CR9f+mi2g
-7KgeZoTuqgfL5U4toCqdIb2gkIKQV524JHt6cA/roQko+8woYUkhSFnkAjjB+nfB
-9QdFthOYHn+lAegR5U3/AC6vVgf8Os0eB9D9qAEuRp8aXVFKXKi6pP8AAkoeyVgE
-ofHrkDsoADt3A0QUFDxqCnCEIfLWC2oBxmT2G5JyM7gMZ5+vrqqyCGgPD9Kn5y5i
-R9bUsqrao9SeZSoIbcXvSlYynnO0n6HkH8dO1IcWneAxggpBbWkZODkpJ9SRnBPf
-jPPczY/0w8v0oB/3Naeqo2kS3I+0uB5OY4SN6sLTynAzyATx7D6ajSB1FsOm3AaE
-/f1ix66z/BkUSTedMYq4O4J2riqfDoJx2Kc86cWW1OW5yJnQbAmkrisrkk9/6UI/
-Er8UvRv4YaRSbh6q16o0415GaPSaJQ36/VawELbbdUhtG1tKWy6jct1xAG7jJ4MD
-WT+0j+C2/ltt0/rRS7UnPqH+zr/pM2ztx77Q862Y+e/Pi40tRcsJT1c6iAdDEwDE
-+RB9dTbAejHjPinBV49w/ah9pKlJyJWnrSUgElLZgqGsdmVGNE15z/2tnXCLfHxU
-3NUen9Xp1ftumUWnUKm3DRn26/SJqI0RsrXHkt7mlJU68+cgnkHXKGPfF2SJrbTl
-dlIQTylCWWQv/LwkYzyM6k2DYLhN3Zi6fQFqUSTJ0GuggHTSN6kLfS50wcB4ZbcG
-4VcO4cywDCAyEOKKlFaipTqCs9pRgCABAjc0TuXBciRvFaq3PGRKVtAIB44xpwi3
-tdkNGGrgnFWQdrqm5A/MKSc9tPTnDuAvoyG1QR4DX2jWgWnpB9OGFXYuWuJ7wObw
-twKSf6FpKSP6ad3Oo10x2VuvOQZ/hI3LRJhhpS+MkBTZSc/rqXugfXiLa/U7p7dc
-1pVEk2xftHuBM1t8uwmflqhGccKjgKSNiVZPIAJzxqO4jwZbNNl3CSQRrkJkHwBO
-oPKZHlVy4L6XHEOPWK+Guk9ht5p1JR9paR1biMwy5nG09haddSgIUN4VtXvj8Rh6
-jl1hbbrEl/x47zawph5pY3NuIVnlKkqSfwOdDikglYPG08ZAUU4/1HfjRbQSyTOs
-n41i92QoD67qdGWwqnukpTubTnBxzznA+h5HtoYmto2oKANicoIxyM84Pto1sAMy
-f5jQHe7yFZGWVw5LSsKwgrA9cD0+uNC7+0qLawFZOEKH30d+M+2PfRbdMlZ8fkKE
-6SAB4Uh8Mjj5lsY4wdwI/LWaLPh7/wBqTa0sq6Iyqi+tG5h8rCg3uCmxnnIJ9OP9
-Px09UlG9TbqUNpdCghbYTgOk8YKccK9PQ89/eqolhPl+lWCdFwKIbhgpdBlN7VAM
-h5B7qAAyU/mP7jSCmOIdylSlIkNtktOoVw4ByAR69u300ZoE2/qoCoz153/2zXxB
-9faJdMTp5bly3FZvTIIis1qHbc12iO3G3IhtSGHZ0lkpdcZddLyUo3hGWUpIJJzx
-Q6JXd8OdGrian8QvTbqn1HpomKcNNsG9aZZsJxJHl+becirmPEnO7wpDGR2VnnU5
-4PU9cYCtdqoJf6xxJkSBlWQkRp+TKfXMGaasQQ2i8T1klOVJ9oBPvmu6F9/HF8O3
-xKdKaX0mujpRMi9LItqN0zp3cFIWqTdvT56FFREjpCZCyXCltlDTwW4pLwbOSSpK
-0+fbqLUqZb90VCmWlcaLlozClBE+FDcjONKCsKZcDqcLKcEbmyR6ZJGo7w2V4rjT
-tpeMkFsKDkEZQpJypM95VB2/hB2FXFhXGPEXRvw64MCuUAXRbW2VJzFJiV5UkFOo
-IBJnymgaP1ErkNtSG3W3HAopYadCo53YO1BCePNjGcak/p7WrzvyUxT6BaDtfqTk
-VUuZHplJM4xEpBGXF7DgE4G5RCcqxnUixThfD2m13LbxbHfOw9YIO9XPwD6WfFq3
-bbCuIMKavQkAZhoskEapStK0gxuAUjxAqX+k1uXj1fuO4rRtizmBXbWbS9VYFUfb
-o5SC4WQEFSwlR3pWkp4P8NX0zL1V+GLqyzhE7pN88eUqMOfCqAVg4OB4hVxj29NQ
-m6trvCrvI1ckHTVJI3AI5d1a2wLpb6M+kTBQrH8LAbVKS3csIcSMpKTsHREg7AaV
-Gla+HW6PDeZmdJLwhISSl9UBl3aPf7mR7enrqEq90joVrJWbidue140rc0j7bjpY
-bUogkoS4ttOcAH19NPuG8S48ghhRDx5KHa9o1PvqmuPOhz0ZMcBxDD73/CnN1Flw
-Bru3aeBSnU/lKK7v9E/2y8qx+mFh9Oq500p/UD9zrTgWu9drPUNcSs1/7PjojIlP
-tGI42lxaG0bkpURkdzzq9fQT9ptZHXq9bfsOmdG+qNOrNcnIp5qVGXEvOi0grODI
-mrZCHWY6M5ceWnCE5JBGlNpiTjSgxcMqSonYa7+Bg1nLjD0esMw3CHuIeGuI2ri1
-QgrlxGSUgSIcQtaCTsJABPnXUuGQqDKI4Owgt5ynv6e4OO+huQgnxHG8KQfK41u8
-/wCn46fbfQr8/kKy67PZjlX7AQ2UzCnOPAKgSMEHjH4euhGXjcQUp3KJHGQO3bHo
-dGtyc6x4/IUB4aJ8vnSPe6OPEHHHLeT+udZo2Uc/fQIJ7qcLgY8OYh/wj4a20bkr
-IURuSOCfr6Eeut9MUG9vmdU0ojDo7oweNx9CkgYPqPX2qlGtuJ+tqny5DkfXdTR1
-16pp6Q2JP6gvWxW7vhU1iOqoUygqQiSlpZS27McUvIQ21wXcBWAoHHcjnlJ+NS77
-jiRjZlOoFAckx1SGZi2zXyjZ5xguFKCCnCfuZyCcDtphv8acsyLNpHbKQZOxExpz
-jvnmKkOAYExjDyi85ATGg3Pr7u7aq2dY7yPxOU+k2d1ztCiT5MKKafAuy3mm6HWV
-MuLG6K8lYUw6wrzLAKSULQFJKCTrlp1Y/Z9IpV1MR+l1+02n0KqVRijsQ+oszwUs
-vOJYMh5uox2y2IrJfAKpCUqw2s7iMZX8K8XXGEXi3bpGZh0jMBp2gAAtMnQnRKh3
-iDuNVON8HNKhi2WQ4n8M66TJTpuNyDvOlSzaXwy0ykdK69V6d1Ah3ExbVCrj9dot
-GtyYu9KW9Qp32dU0roawJjZb3omYebbSmIlT6ihKTmscD9nf1iqM1t2VefTaDak5
-LVRgXZBrL9TFVYmstS6etiF4La0uSGJDToZeU2ppK8OBJABlOGYgvhK4xC8xpAzO
-uwAhQVC0gLKSe4AOJ1OomCAdKT40yOImLGysT2WkalQIlJhIUOeYpMDw3qGOofwI
-9frWqDDlNhU2+KWKk3Dcm2jL+ckUxaiAVTYZPiJQkKAU42XEpyMlORqVrQ6P/Ep0
-0TPsW2bTlfbPUWmxmKrdb5jwaXSfCcIcEd3cFbA0SklSfKtIUPvcO73FuBY1bNtr
-XlzGVJO4KCDl8ZjQ7R46UXhzh7GcFxFd00iXIyo5HOIzSdgkEyN5EVeToTGuP4fv
-Gh0PpT0prdfnvNya/VLc6v0yZdU1SDtS27DWPK2jcsCMMYUVE5Vk6vjUer9DqVDf
-uQ9PY0WvNtNuOTVSiYLK9uFtlCfIlSlHBOCkbQQO+Y1il5apuTJKioT2klJGggjm
-I2jurQXB792qxLKkpbS2I+7WFhWokGPwqJ38zVP76663pVqi7S4FtdKbfkvsJW0/
-dvVGFRpMtStwIbhpTvUSUKABI5Sr2OKEfErGuyqWLWHrkpEJqXRahDqbD9Omrn0x
-9C1rZeCTjtheMpUUlKgffTjhTdm3d2twgqzqUCJQUggwDBO43127qg/GON32I4Zf
-WzqG+rSlSeysLUCB+YDY7aGoA+EL4Z+q3XfrN0/tC2bWu9+2ardcGNdtyQKHIl0S
-36aH21TZEuRtDKUpZSvAUsFRUkDvr3mWnZVrWHRYls2jb9EtmkQIjcOPEoNJjUeM
-pDSdqPEQw2gKPG4qIJJUfXOpvdfZ3bvrGwCoDUiN+U8xp7azrcYriq8Nbwh99Ztk
-qKktlRyAmNcu07xO3dRvBYUGZKcZWGidvrjHJH+uhWR5VlSSM9vbd+ek9vEr8/kK
-Z3J7PlWyIoLEjGR4jJxxwR9fr30KSmyVEH7yTjPqO+M67t5C1jx+QoTxBSKbC4AS
-OODj7qTrNKITyNBAVGgomqobkR2EZBdXBSlpeNpdSAcEfUFOPpjTLS5K2XgkJ3NO
-8KHGEkjv+uqpbEsVPHJzj65UV1gtuQYyH2m5EV+L4L7DqUuIUPMhXB47ZGDwR9Nc
-zevnw+dBbVLlTth2sWffdxJclUe1bQrEeFRJzqiQZ0yBJbcYiQ0q5ceaLeSkoQlS
-zw14rZWlxYB24kFH4SN5OkciD3j191L8IubxjEUJsT2iQPDxJ8hPvqFaxZdl2zbl
-MkPR6t1NuCN4P2z9kLVHokxQSjxUocQEttdztJWrJCRkZzodmQrbn2NBv6z7Zapk
-F+76hVaVRpUZL79LbWpMJTTratx3oXATvGTlWecd4s/bt29uconUa+r5ke6rSvHn
-VoZK3JM6924pPb1w1+BOaq7FtORZz7LrLFYptttxnXm3g8mQgkpSFIdStYUDlLm9
-W4HJ0MdfJMuk9Euo1fpTaWbmptOer1LYk0nxmlT0tFxkOxipSVblElZVwfNk6S2Z
-+13bbdwDlWsZt5MkSZ5nnSJ4JtGlPW5GZI02O2oBqA+nPV6uNdOrTvy4qPT6rCvC
-k06TUqhbCkU1mJUFMrZfjuwXCdigYqsracKFbQChKgU6nKhU6g9SY8hdfprSqbUk
-BtNLckKS14ZAP8VSCCQrjcgYB4Bzp5awk4fijhQ5KEk5f4hBjXu059/hU7wC1/xF
-DaSRCkJUeXaAJEe0eVSLA6A9M6FTJi6B0ysiko+X+cmz6ZQzCYn/AC6vmEKlNle1
-YbUN6eBg86i1mMmo2rfcqOl92Gqtx5rEhhO5khBQt7AHlIKgs8ZHfTte3N5eOIdu
-VEmIk6mBA+hyqb2eCWGE260WaQnMFEgQBrEiB36fCj+J0ctm57WiXTJ6c2RcsV2M
-rwqwbdjVB2HuK/E2JUlQb3laioDAUpSiQc6hepdPbZZcZpTlv0WZbYdJVbk+IhNB
-dWAcNOspA2tEgZSnA49NL2bi5WtkvrUcsQJMDbUUwY1gOGt4a9eMAFW6hA12kHzH
-tr0MfD/09trp10rtq2rXgtUykotmI+3AiITHhJW5HbeWUtJATypRPuP11IbpRy06
-AodkrPlWjv66suyBDJk9/wAh+9YzvYNwpQESTpy1pTDCkIk4PifwVBKzneP8p0Hz
-EArCgAUq5URzn8vfRrc9twePyFI17JIr7pgSFvIVgpWkqT/LyAf+/wCmhyWoLK0A
-grB8qh3WAePzHtozMB1fmPgKC6OyPrlTQS3k5SnOecnnWaVCYoEJ5ilVUcWqHTnW
-eHkMFW0cKc2k8p+oGQR6jHrpBEkMqU3JSsZUoF9vG7BPO4D2PqPrqpmdER3a/GrA
-XBVNRB16+ICnWOINkWbHZuXqTWGiKfTm0KegWy24SUzKiAR6Hc2xwV8FRSnlVQZF
-CotJh1e9Op9Zfq9ZkqS9UqvVl/NFbifuJICSnaM7UNpTtTwlCUjjTBePOXAypP3a
-PZP7be2ppwphYW717g7Sh/1ET7fhTnaEfrNcLkKqW07adMgTwQ3TK9R5Km3IylFL
-Diwl5Dzbq0jJQlCkJSpGVbtwDZcNGkWk7fVOrlMiUaNRFwbviQY7yTToceoqc+bd
-ZCSAWlOsuuecJILisp4yaVwrpZ4X4g4vueCrNalPtpKkOf8Ajcyn7xKe+EToo6Kg
-5dImX3Sra5t1FlMFCkx/N3e2huLelp1CdGpMB9uY+VhGWJSPAawgKJThWFgJH8ud
-pCvbUqdO7C/euVKlz6ELjtqeapbseBVobr0esqlNLYkPFLagUpjtPvpC1eRKwrJG
-ASfj3pDY6O8DTxIlpLzgcQhttRypWsnMQSNRCErVtuBzpscsHLhhy2WoplJk949v
-nXOz4meiUH4abks/prQH3UdOr0m1evQLWltqcRYVQjMxpzkanzN292PKNQU4GZIL
-kcoWlK1BWBI/R+KmmxKY7JbWsRo6FltPCzhIyeePTJJ7amvCXGLXGGFWHEaAB9qZ
-S6oJOYJWT20zv2VAjXUd+tTzgK5WyequPyAoB5hIGX1xG2lSP1T6sv3zFcpDjSI1
-PahJgUyJTY7cCJOSDsUtxCcb0nbjKuVkFRJGBpHYdUepHTS5qZV+nM5YkRHpUCpQ
-pgbgJAcc3BSTjByQNqs8EEZ1Yi2VOkOJIUsiSmQANBr/AEjWO/XvqTPYwCQ2whfU
-glIVBVmMj+4yJ7pBqN+m3VKt2hIkMRlvwYslJZq8CFIDsQtKABcU393egY3ccgEn
-kDUl0yLFuXqdY1KlwVyIVfu+mQZkVRUkTWn5baXE5GPvpUoEj00NhQ+0tsg5kkyP
-CSNPnSLGsSbPDN5mTkfSmD4wkQfbI/au9dCUy2h1hpCGGW2CyhhI2paCU7UJSOwA
-AA+gGmyUVlRQFAgkjgfqPofpq1WNFLCe4/IVi53UgmvumlxanEbipW3BGcbuOCf0
-76YZLezxMEKSVZTwRj3GumdHVjxHwoK9k0npxUJBQpXdOATnaoY9R/VoenD+KpCi
-lDgOEuHgKx2599Ea0eX6qG5BSI+tqai5JBI8NB577e/99Zo2lAn+WvyqBxVNh7du
-9pxaU7FFSkjyqT+Pf09tVB6p/ESikXE/0u6eFNR6jNtobq9VbifOU+zS8QUocSAU
-uySglYaXhLQKSsnIbNSuulu2lP4jt58/nVkWlobu9Sydtz4DT686rrTQ5alxS6lM
-8esVepvKeqtWmrU/UH5SlbnXJTiuStRwTkj0wMAAWF6QNwr4YduV+nCrvu1SQ1SF
-uMZRCQy6uMohKkq2rUtt0jYnfgp8yQMHN/T7xDd4TwB9isXC2blwNrUDlOTKpahI
-1GbKAeaSod9WWq2Xh9kHWhBVCf6eXrqz1QnRrM+zU3JT4dKQ4lyTATLnIclN+coW
-lTZT4iQsDcnGRnHvjVUfiOs7qFd7NauzpJBiVd2Tb7lGm26wqPUnX0+M1KjylM5w
-4N3zCHE7htS+CM7CjWHOjrG18GdIWG8RYm24MPWXEB0oORQWnJIMQcqlSYJMTpNL
-rdhLtoSSJlMidRH0K5SW49cdofELYfTKp26611GuV2NJTYFMo0qRWEsVBh1xt1Y8
-ZxuNGU0y4+p1ZbCGELWoBBSVei7pJSkWJZ9r2VMZbKogebn1lLCWnak9IddkOvBH
-GEb3HcJVnASkHGdWr6UHFdnYYFgeHskEXIVdogky0UlDa4iQFdvJMSEztFKWVtYk
-84m3MhuEqPdI3A901C3xkfD5bHXGhv1T7GRLui3nnqrZcxttTEyivOJCSvxRjxEP
-obDTja/8QK3A5SkHnJYUViNBj+I0UPwwkSIixkeXhQVx6dj/AP3Vi+iVxE7ieGOc
-MXa5FskFsH8qVGSB4ZgpXms0osHvs6XXAPzSfr1e6vq+KDb6n2J1Np1LkVSkq8en
-isRw/ArcNZS6IMkY5SlYUptwcoUM8jI0ZxKtbcfpQ7UH+g/T4VPD8JqNJu6U6ZL6
-3mXGpLkQYQpKEtrQ24V8IcdAUM62wWV9Z1fcke0bwNDqTUsw1tV5hzN1b4iq2lcK
-TlCgdT2hJEQnffZOkioV6fwILUmqz67Co5rN3VBPzUehxExaRSYrSw4IkNtP3UZT
-lRHKieSeSbEdIaci4fiC6dBDLaUwq69XlJByhpqnRXXxwO3mQ2PzGi4egvPtOL0J
-UAByAIAFRLjW5as7W6sbdRKEt7nUq7O5PMnU+JNdkqU6HN4UnnwCnPcgYOQfqNNy
-leMCVYDjKtuexVjODn6451Y7JPWLSO8/IVmFwaClVMc3PBWcbgRxwCccjPodMs5R
-Q6tZO5tSjkjy5/L310wfv1jyoKhKRSeIhJkIVypJPlVjg40O1RSC44heU4XnkYx9
-f76O0T1y9eVCcHYH1ypiK3MnGCM8HHfWaUSeVJYTzqoXxE/EQLbiHpl07mR6j1Yl
-xQuUphIdYseG6lIM2UR5A+QoFhgncSQpQCRzH3QnphRbEpD8uW6ubVJEpVWq1cnP
-GVUq8++n5guvuq8ylKKzk6ppwKVLivwp0Hr3Py9XjV2YHbBodeoaq28tPjQV1Wej
-1+vxIVDVNRVqlMTTKS1RZKoNXqjy/usg9ilIBUpagQhCVEkAagO4bY+JqxbEqNlM
-1a++mVPsmvVSqVOpUSm1KqUK56dOkPympzdwwUKUtmOy4UrbX4RSfMpJ+6mpOM+G
-m8ftrV+5szcsMvhakABQHYWkKKT+IJzhah4d50MuucatGXmrO8IgawY22GviZgeF
-QlTeiHXuQqLcQqFcqkWoPIVKuUyKq+6Iq9mZanHUFSg0hYcKXFDGMnaASPUp0h6M
-dNba6fUSCxSVVJEC322nJ761ipTNrY3OuO5ytbmFLWtWeVccAalvRS3wVxtY37Fu
-WL23aKG1NlIWhEhWhQoDITGkAbGDUY49xB1lNp9kBbnMZGkkZY1B2EnSuUX7NfpF
-02+I3rN8YfxrxrmuBFYvDrNV+mHTun12I0/VrZtuG3AJdL48viyUIhsYjkBtiGlG
-SVq04/H71Z6pfDB1Is6JbNzPOwrroj1RpyloQ8IaoLoblMJbKcKacDkZ1JI3edQJ
-PGm7pU6COjG7YRjl5h3Wv27TLDYW4spSw2lLSEJTmAGUDNP4ioqUTJpu4Kxe/vcS
-Thri4Q5mUYEdoysknx28o5VFPTD9pjXroqtAs/qpbsWVDrj7VHF5UGlvwp1NdfUl
-tp2VFShbS2k70lfhJSoA5wQCNV4tXqXEq993i14LcMt3jUoBjqa8JKW0TXkJGznA
-WnCx6ELHPOqF6Juj9vgPpBu7nDVn7G4ykpCjKhLhzJPMJgZVEAwqDJBNW01aoYcV
-bqVmCkn1EbfXfRbd/wBpNT4qluh6mSCV0ibs3IZWoJPy7yhxtICincOQTjODoppV
-qVGtWjLUuZTadNhH5qmwkR0qiTGyklQSvnncAMcA5I762dadY5mLJjSNfL4eNLML
-urZhtLVygkSFRyIifd3aVBlMZqorrjT60tSIrym5TkMeFHispzkkZwFHO0c88nVr
-PhPuqjUzqtddeq8GqSItHtQU+FU4scSIdPXMkoDqVEkFTqm2MBKOQkqOMEZiuM8V
-4Xwa0zi2MKIYQ4kLyiSAVJTIH5sqiCQNSAY1qO8TWdxjVrdqtQMywQmTAOo05CRI
-FdQ6D1Z6cvOoP730GP4xP/t6jOTTZTZGQQpp3apJBBHPGntu5qBNdUIVdokveMpM
-erx3i4PTsv01YmC8Y8K40k3GFYiy4lcEQ4kHUDdJIUPIiaz9eYViVoeruGFpI/lM
-e0TT3TndshLiCFNqUBkcoX+fqfrpvnueE+6pICkKWd7Z8yDqTNGXlwdOzTWoEJA+
-u6muOvZIbW0rhStxbWdw78kf99NFR8zr6P50uHuDzzkEaK0ZeUPAUJwdgHz+VMpU
-gEggd/6sazSwDT9qSwnw9tcsujfRluiRHJE9cmXV6pJTOqVQrDvzU2syFK8Rx2Q6
-rzLWVAkkk9uxAAEq3pcUW1Ke8pK1pLjASlbqwhmOhGVBByrsMqWR2wdUriLgt7Yp
-netD2TIUsZRWvo5Ci2309vr4obnpsqpNUq2p02y6aQG5gpbKC49MZ3A7HJziEoSs
-jysJTx51ZrNC+Ief1YqUG3+p0+opYrkyHV41uUuUaXadOZmJQ7DQ003tXIQ2opBe
-fWsqUkrITjamP4pfLsrFvDdgtMr/AKoAHluDzAApss7ZGLYvcXZghswnl2e/x5jz
-Jq2nTmZbVkTajSbZjNiU94jq6UFL+WkJS+0HHMjyBaVLUUqA5AIUMKUNSv8AFf1w
-d6B/Cb1svW2rrlWxVKbaztLt6konJdhuzKuPsyK1EaV52yhyWl3DJCQGhwMnWXcW
-xbiXgPp0wZzgi7DCsaCGLhBCVIXkW2gKCVCOsAcTkMSCFbhSgZBiNla4hgrv+IIz
-BrtJ5jTmO4xrQl+zfoI6F/CD0cpEWe0m4Lpt1zqNXo8pouJMuuLVOSlTiVJUPDim
-ClYJUSpAASDyQj4qL3hdSrktyTTKRSrgq8CC6xUa/Ijht23St1tpMdsvqGAlsyJB
-S3uy40kKyUpAl+G9MfSB0j8TrwBeHlOEW93clV0gKh5tCnOoZXIyDKS2V5CVKyJk
-JBVLNheBWGGKTdJcPW5EykHYkCSO/XUctTVQKZcFt20E0oUmPeVdmSnESZaCZXyr
-bxJ8HKwHiVJAQVlCd+0nABCRKNN6F2reKGq/TKki0LiceUzGnxGHZ0B5LR8NDUxv
-A8RHlwl9tRVtA5PfSfpM46VwTbW2L2aAtxLwQpJIBW0UKK0pPMFKCDGigJEE1P8A
-AbNZfK3DAUDpykiJ99SKOnF/0anuw6zQaNdlK8LY5JoVXZqDTyR2JjuFDySO4BSS
-PQnQgbNo5ATEtq/Yq1L3LpseLNbYzxkI8RJCBnBG1WOfz1IOHPSA6PcZw4XFzei3
-cSBmQ7LSxtoNwsf8SakaMHuWnCpv83fodDz5effS2J0hrtSaaYW1B6eUWQslUmqH
-7Tr00qxuWmO3kbyOynnBgkeU9tTLTKXafT6jtW/bdOq0xxkEuVOcyWFSXV43ugrA
-SVqOfORxxjOANZu6YemAcdXreFcOnLh7MnrFdnrFDSUpPayjukZjvA0oa7Ni1RLy
-syp2Hz+udBkmhs1Zx6THQmRIQT8xS3uJiU9z4YPJI745zzgnXxFs5Lny7yUJSzNJ
-TFdSNyFrGSWie6V4H3T6g6qFniK+tQEpcMp09Y2I8CNfUTSJdky9JI0P7T7KkCiu
-XnZiG6pbVw1OG224EOx0yVPRAochLjKsoUleCORwRjVqbP6lN3O1Gi1VTMGuSYgm
-MpbOyJWG8lK3I4JyFoUClbOSUkZBKSNbE9GjppvjxE1wXxA+VW9wClkqM5HRGVIJ
-/K5qMv8AFljQmqr4+4UZ+xKxSzRDiNVR3pO5PinfyB8KO2HNshGCOVZ5GACcH++t
-FT2tzHzuI8yiRnhQ9Dr0Ca/31eQqk3B2AR4/KmvxG/c/8I1mluUnupMCAImqhzpS
-KFBchJTjw2lNrWVZXhIyDxnzAnv3wdUnu6rQupHUKBZFTmyWrTiYrV9SoTLkmain
-sLATCbbbSVKemrIYQ2kb1J8UgYSSM9cQYjaW77ab9xLbKSCtSiAAkamT47Ad5IA1
-gVpJDbrdi8+yCXAnsgczoPfHvq5l13dXrptZdi2JbtJRZNbtl6iVn954rkF75dxt
-UdUOKyhW1kIawPEcyQpSAEeUk80upHRWrWNAoDdNflTKlbVuLl29UJDO2o1mksyX
-lT4D6EgJVNpLhS4dn+LGkggeU4pG56VbPijjUYLZBP2NSSGlkELUoJCzIJgTCsoj
-aO813w/gS8KtTcPE9bPbG6QNtPIameVWt6LzxeES27yhzJDFTdebjyoMpTUmG/Ij
-rC8q2p3IQ8pPiBvcNxSQeFY0k+OToX1T+JSyLAsugVekwrZT1Ph1rqG5VJao70em
-sRH2kvREhOX1hbrhLHBK1NqBASrFLcecRJ4c6bcE4p4jSVWlgnrGEISJJKVKSokm
-CeuSEk6AISDEiaf28KF/hFzaNKAWuUknbcT/ANfjXUr4VqFT2Z79svxhLYXZdIVZ
-0aa6lTNvMU9uRHlxmlKPkbVmO6cZyc+m0CnXxzfCmLm62Rq/09n/AC02t240byp9
-KOGm5zTikCR5lobR8w0WyorI8zSl/wA3OgeGMfwTD/RmwjiC/cDTbYklIlSnFPuJ
-VCRutZJURzknQaQ1tDlv0j3LLKJSobdwGRJHqke+kHSj4ZenHTlTFWuhxN13SpnC
-qdBdVXfB3HKkOup2ox6EHYjvkqHe08K+7VTGDKaYYaIGWW4eY7UaOlHBKlJ8iUge
-gyeONYK6Q+Nbni/G+vKSm3RKWkEyUgwSVdxWvdXkEjspBq1LVrq0SsyrSf28B+/f
-TtD6gWdISXVLQstIK22UwlOeOfQA4Gc+5wNNiOplObS67NgUtDOSWVRnU+M2PZbf
-PP1B1XRJKUrQdJ1HspwCylOU0zTepNs1NtyPVJEbwHOEslaVpbHsRgjOPQfrqP6v
-RKRWYz/7n1oOOpb3/ZMpfjw3fYJB8yD7FP6ac2H0AhKx2e/3ajx5ew033AUBIqLE
-swvmTGqEeVbVciKAblbVuxhz910HzBJIyFDI+uieE94LrseqRgUywFVCPHVuamgY
-KZ8FwceKjAUpA8xxnGe6l0KBCZlSdjzToR6x/b5UJpYAk7HfwP7/ABoshlpucKbU
-Fsvw61EAanII+XnpP3H0egUeAtP8qh9Rpmq9Clt0WqwGVuNVi1J4rlIlNkpWtl0e
-faRyAopzx2VpbguJP4biVvdsKKVIcSoEGCJ0kHuMx664uWE3LC2liQQQeVSp0t6k
-JumMmnVd1LdegtgrWFAKqLWAQ4keqwMFQHceYfzBMq1VZVJdSlQBB3JBOcggHKfx
-z217P9FvGTfHPCNlxBmBdWgJdHJ1Gi9O7Me2AfyqFZV4iwtWD4m7Zx2QZT4pMEez
-Y+INMRmEEgpOQcHkazVjyPo/vUdg8q553dWq5cFSTQLZIcq9QUppLshBVAp6FeRT
-7oHdKStAAHKlKCR3yJKsmwrct2VWreixY4DMyJU3q4tITOqrjrJjGRId7rcDyHQg
-HgBSUjAGvMz0geMFPYozw/aOEJSM7kHcyOrSfLU+tJ7q2BhtsWrQFQ1MH4Ue0ENI
-o9uynWEeJVJaoc5ITtSt8tuBLmPqqPjA45OmDqbZzV105QZcFPnRwi5rfrAQVilz
-EpdZccwOVIJQUPN9nGn3UkebWarLiG8scbt8Rtz94y4lQ80qSojyOx8Kc0W6AwpK
-hoRr5QR8Kkj4YvglodIS31OuCuVJyHdFBjVGFYNOdLVEo4eQiQttyUrzrCHFAtob
-SgjCcrVoov6ixoD86nU5Ti46a0uJFU4oLcGGXlAHHsfLnWw/Sl4Fw/CeB8IxxztX
-pf6sq1/A40teSO/IUCD/ADK51XHBvEdxiuN3TATDKET/AFJUkZvXO3gKZqZMqrf2
-fPp1TXT0MR0CHMpiPlqtDG3a8wHc7eSFJUlaTnP4aSzb0t2JKeXLpEifNdXvlza2
-tx96Qv1WtSshR+p9uPTGN/8ANONL4PtuF27lZw1CutS0SMocIAJ2nnAkiSSBJNT5
-Vowb9d1kHXHTNGsbxQ1Xr4aqcZcZqZR7VpG0hxciWzTVT8DlG4qSEo/qIPPb31Fs
-2t0BHyk2t3XZkKBJC00xRumDApVRDBCXAyouhLpQSkK2lW0kA4zpiawrEr9KXLK2
-ccBJAyIWqVRMDKDJABMbgAmNJrpd0wyoocWBt3gaaVomXj07prIqyrzsduGClubv
-uqAGW93CcEPZyTjAHJ0yTOrHTOeG5D3Uez5TKFqZZZTcUbwkbAkqCWUq9AtGSf6x
-k8jS234O4oebLrOGvlIMT1LgAMAkapHdrQlYrZhUF5P/AOh4eNam7/6cVKQxDi3r
-aDciWssx4rdYjIkyyhC3FBO5QyQhC1YH8qCcYB03VDqj0Zt+VDTK6l2zSKjKiN1K
-Ef3qhxfmY7o3NPsnxxubcHmSseUjkEjR2uC+MH3haMYW+pZTmgMrUYGkiEnSQfZX
-DuJ2CEdYp5IA8QOXMipfp15US8KJEk1Fuo1yjvIKqTeNLjtT07QSk7ZLKlIcTkYP
-ftzrQflae2GC43U6A+oFMiMVITHUOEOo9WHk+ueDjg+mmJbNyw6bW5GV1BIg6EKB
-1SQQCCDIIIncRrRULQ4jrEGUnv8AZqPCneAou4t6oyUrZnL+dtqtI/hobfznCscJ
-KiMLSOCcnUiw5SqtTjOkthit0PdRa9HKf8VpeNrh90hQQ4M+iVe+krkBYUOYj4j2
-KBFKrY9rKr6+hVepyf3euJ95nx2kwHkyMR1lDzkVxXioU2oYIW1uWEqHILfsSNWl
-t66XayyzGqExmbJVGS9TqsykMpqrRSFIUpPYL2kE4wFYPAIOtm+jF0kP8N8aN8L3
-ipscQIRB/I+BLah/znqlDv7J/LVW9IOBtX2ErvkCHrfXzQfxA+X4h6x30RmRJyf4
-eee+O+s16WZh/DVCBKoqmPTOlPNS3J9UYRHr8qQuVOY3h0x/lHNsaKFdiGxIQ4rH
-dZPsNH9OitzqrVKbJcU2moWtHZQtCih5Doky1oUhXotKhuT9U/jrxO4wx5XEXEt7
-iU6LUop8EgkIHqSAPVW1S0GW0gch8q1RX5s6xZUhtYFat4yXkoQPDQ5JgyFpeIT7
-rRuWB/nOnlyaKnTXZTKyqPNp6/kmjhSAJ7kZxG317uPDjtt1CEZk4iDP/kA9Skz8
-qG84AwY5H3bfOpppnVW7qJZVAtODKgxYltqESHUmYWKlUGG0eChEgrJQQ22AMpCS
-ogE88aCK7WVSE0QOKU4+uppkTHnFblbpDchLe4+qlklX5fhq8Okvpe4l6VvsLGLp
-S1bWw7LaJMr6tKFLUTqSozlGyAopEzNQ/A8EsMCS8q1krc1JPIqmB4D3wCab4suS
-2l1MYtKCnd2x0ZQVDvkep47/AF0H9XLtFudK+pFxVZqNEVQLEq1WYlIQUstvNQH1
-MEE8lXieGB7kgDVTcNWYuHrKxKZ6x1CI/wCSwB8aer94oS66NCEz7BXAyrUiqSqW
-qpsVVqKmhvwen8CsVSPFr0yTVqXRLfixKE0xK8XHz9Rq9RkOhCMrTFWpRw2Rq0PR
-gQKRanWKvuT4lrUui2He9et+usW43V2LPcrt4qpECVBpiRg4/dzehpkDJWAMDW3e
-Mi2eG0NWbRUS6y0UZ1IStSilASkSEIBzlPWJSJzrJJ1qr8MUr7aS4rSCoGASANd9
-ztMHkI7qeots3a7X6hSW6fdbdcVbjEKr3Q7Z8yb1BsqKw4Ju2nQ5U1ElRrLrvhSF
-xvCkNx28sOICAkuT9x02wY1zX5UKvNj0GqWfX+harqtSqXBUru6aV0MKra1yaJXn
-FS2pHhwWwlEaUWgsNqVjdvFeOC1xVaMPsENuLcCQNWVtjMAW2VfdJcbt3Utkpdty
-pvrLhwOIIANPCVKYCnHVEAE8wdNCoakFSSYIVBhIgjatdXsC/Ylt1et1yxblq1Ao
-fTu4q6/X75m1JiTR5Ldsz0QJ0Nhdy1BtyQ66/tKTHAQl5ZCk9tUeqanaNT6g5TLm
-pliJoliWTdMisqlwYdbehs9PXlU1qKy+N8ph6qhplxpgLyuQ2VoITlM44AxLDMVb
-uU4d/qmQEpU00n7vrOubytpDVvbEoJWhS1dV2ZUVKKUEhqxZl9goLhyK3zE6xlMk
-ypWuhAE66aSa6ufA/VoqKB1ZYXLdsypMdWzPRS4kFEe34X2lQKJPcaMMAMNocdee
-dLaUpALhxjV7JFxs0k+LWbXpsiHJQUSKrRSlynSweCVo5LZPsokf5tZF6SCn/POI
-EdokoVJ1KszbasxO5Kpme8mTvNT/AAbs4Yzm0G3lBiPVtTRNtuhV6O4qzZ7bT5QJ
-yremn5YKI58SOedigf5kEpPqNfdt1hf2ir7Qach1P5cUO4oUtIZccBOIshQHcEko
-Kxwd451BSEXCM7e/fznQgn1iP/tPCCW3Ae4x9eygW9KK41KZmFCiabLXDk7Blx+K
-VJdTgdidjivzQdEdPbbipegwnSwilpRMppRwRFdO5soHc7FhQ/Aj31KsJxJ3DHWM
-WtVQ6wttxJ5FCgQffPq8KRXzCH1qt3PwOBST/UPlRQm/IyUpTIplRMhICXyyApkr
-H3th9s5x9NZr1GtvSL4Dctm3HXVBRSCRA0JGorOrnBGNpWpKU6AnvqG4koQa7DmF
-P8Go3PMpb7yTlLLj7aCxk/0uABP+8hGn+qB1l25ahHQrfAZps6MEnzlLJddewPqH
-HQQPrrykcUEvNqP5kifURPxrWL0FuORNI7LqzD0u8EpUlcVF1mUEp8yFMVCMytX6
-lS/1OlVjtLkxbZpwVhMJyWXdx+9HiSXmYpP4qP8A+WulNEXJUP5Vf9TTJcOQ3PhH
-wo2uKrRYzchAcbU1Fa2hAAT4iz3HA/DOhiqyVyqUtMJuV87BkNV0Lfb8NMxcYhag
-n2GxJSkegHvnTtZtFthDzuicw9cxp7aagvMSkGn1laUuNSFFbUSW2mQxM4U0gKAU
-Ar24I76DettrWle3R3qLbN33ZKt+2KpbLqa5cMVCVuUiO0tEgujeNiyVMpT4ZPn3
-7RyRrrh64ucKxuzfsWwt9q4bUhJ2WoOJUlJjWFEAaa686LeJQ/brS4qEqQQTy0g+
-zeubdJ6SdMpTNt3qrrxfsW4rjhCcisQegluT+oEBSZNRp6UyKiyh3wJhdo89AdS4
-XT8sVFzA3CTqbZPQhmHLsa1r16pTLJftKz7NqVPpltxajLu+RHq0eoQYBcejB1t6
-RIuCO8/scQF/NuJSMMrI0djfEPGN2osJwq1ZbaUkyq6fc7TDqeo1WsKbbbVmWhAG
-VSSrMmTFQ+1s8PQnOXVqUr+RI0UO1sIJOgJ5gRT/AHranRu9bsv+8psyvUGtT1SK
-5eCKjb9rXDSYjdFkTLfjvMfazBebW4qjPtqZgrcQhSApzAUFlNP6SdLLg6c9NarW
-bm6kiyrivWZ02TFlv2/YdFtcTjUIdWfXBjwvlGitVNdZXKQfHWiR5ZCQohUbaxnj
-HC7WzbW0wtTBS02pRuV6pt1BGRkuhnOW0ZVKbEyuIClLAVG2sXlrSCoBUqIGUbqE
-yqJiTInl5VYGt9dLEmyajYke2avXKd9vU6xK887IiQrZXAqsp+ivP/NuKKVNtONh
-txDmzKH0KCwDnVPqX0j6AqbttMFrrTbDUKC2uBNRdUWsWzZq3hX34kOLGqTUiQ00
-P3fmJOxJQ0p1nOfEGmrhay4n4TsLhqxvLY9bkWtp1tSxmKFQlC0lK0uFK8isq0gA
-wokZTSi8ctbx1CnG1dnQKSY7xMjUEAiRI7tKsz8MVjohWvfN0WvUrvu83j1Ck1G7
-Jl91CJULnfqDEWM0HErbbSy6w4wthaFDYrCsEJ27RYtFQfo7ilPUtDDTnlkQ32FQ
-C6D67FZbJ+qVaqjjY3txxbeXN4htLuYJKWgUtgISlCQhJJITlSmATI76esO6tOHo
-Q2TESJ1OpkzzMzS6OikVYoftx9dNqcZQksQA4lC2XP5lRljgZz5kfdUCcgd9KZjB
-uVp1D7gh3VSWiGZ/h+CqQ0s4U28nvtyQlSeySULT64ixSGng+NAICh4GBPqOtLkn
-M31c77edbZDjVUi0xU8hl+cBBnsEZLMmI4lTiT/vJD34hSdD9WlCBVaXUlJAYXNd
-pUoABI8F4eIj/hKQR+Gnm0azrVak6ZVD2aVzdLHUofG8j5UpXTZe9fhhCm9x2KHm
-Chng5z7azSIYg+kZY2r8yW51IqJEKULdyFKyZ0dwnPO4CKQr8Qec6k+cABV8ADNO
-UDx3wt7GdCeJBajx/wDWn24JzHz/AEqE+li1/O3inerbspx27jjPgjnH5D9NSb0/
-UrwHjuOU0qMEnPKQVSlED8SST9TnS64JDjkfwI+FMV1/tjz+QpEkl2uqS6S6kSNw
-S4d6c5Xzg/gP00WUNSnHq+FqKw1GUWws7g3ltwHb7flp1uSfs4HdCf7hTWwTE+f9
-tE9GANOo7ZAKCwnKDyjhCccadqxAg1CPFgz4USbCXUo7q4cuMiTFWpp1DrSi2oFJ
-KFoQtJxwpIIwQDqO3jzzN4lxpZSoSQQSCCNiCO8c6WM9pISraB8q/Y9q2uh3ci26
-AhTry5DpTR46S44pa1KWrycqJkySVHkmQ7/WrJGm27dYdMtig0VmUUBBktUthuQR
-G3fLjeE5w14bewZ8vhpxjA02N4pibj46y4cMyDK1GRy320FKw02EkBI35eVOTdGo
-6kuLVSqapbXDSzBaKmw+Nz4SdvHiFSivH3iTnOtkOLFIXGMZgxkKOyOWU+Ajj0Rj
-A0uS++sQtZMcyfP4kmkZADsCktQjRvCQPl2MOABY8JOFjBHPHPYfoNBdbjsJQVJZ
-aSoEkFLYBBB4PbSBZJGtK2vw0IQnnoklluK65GR4hGyOsso9R2GNTPFAfp7hfAeI
-CcF3+IRnv319aOLURmUfbQ3AANPD4Co2u+HEYgSX2Ysdl9l5xTLzTCW3WiNhBSoD
-IPJ7e+kqVrXMtZ9alLffiyEPvLUVOvAMnAWruR9DpZek9So/yq/toVsTI17xTTWl
-KEqIAogG5IKiAcAlUUhRP44GffGmq6CTAj5JP+0WDyc/ypGnXCyTfon+H9K/bkn7
-CrzpwiPvCJFw87/8dH/2H+kazQl/iNDSTA1r/9mIYAQTEQIAIAUCRIuuhQIbAwYL
-CQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEFI0hF3yuSD1DYoAn26QnS4XjcEQfUej
-03MlZCcaSdL1AKDb9ZBnsahW3KGVmhYRA5VU5X4SwohGBBARAgAGBQJHHNOyAAoJ
-EMckhjdNIInbsl8An27QwLkqHt9ciPK2+6vTkYshmuFTAJ9P/6V9og6jphPPKsCj
-8NncsMiMvohGBBARAgAGBQJHHNP9AAoJEHadk3JoxR84LW8AnRIDpf7CgXgw3uxO
-DujWkrLj4nE0AJ9QxjA3+Kl/XmLnoUURXvkLFCEtkIhGBBARAgAGBQJHHOmdAAoJ
-ENoowjp5/0R0IS0AnjoxElXZvqnIEVhz7hMAju+nJSP2AKCaqA3TbZPQ4w8HsqGY
-ifrx6EDYrYhGBBARAgAGBQJHIeYTAAoJED/eqnpP6wOhRroAnjnypB+yVgnoUPcH
-j3nvifvC9Cc9AJ4zlbTXI0hLE34t2a1yR1ZXR+0ayYhGBBMRAgAGBQJHIetpAAoJ
-EE+rSctElUDyr8UAn2kL/7oRveLF8+im8hPNs2egVOhcAKC+P0hmr9USd/8vRkeh
-r2RI3yHnLLQkQWxleGFuZHJlIE9saXZhIDxseG9saXZhMkBnbWFpbC5jb20+iGAE
-ExECACAFAkUSMYgCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRBSNIRd8rkg
-9Y/6AKDy4ZrF38Y5nx5RUp2ixb06dpIHdQCgj4yficR7p3/iGr1kDlgMOj3+gpi0
-NkFsZXhhbmRyZSBPbGl2YSAoZm9yIEJjYyBzZWxmIG9ubHkpIDxhb2xpdmFAbG9j
-YWxob3N0PohgBBMRAgAgBQJF9MeYAhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AA
-CgkQUjSEXfK5IPVlTwCeI+QnjS+TJ6ZuhbLvWVYQv2dh6vgAoLAMjgWMoeSrcRuV
-ik8NvzXG2h5auQENBDdxLdkQBADKUKFem3lLTibOuS5ZUeZ2WZn8uJn06KUwEy12
-6MebajTCKeGLGbxLn44sxD8hOvbIN8Qong4XUy6JXP9kABwgQ8CwhC/cY9dQCykU
-gsbkZD00SYGY8uNJ4+AkfLbBC2gdUEgQgcMKe5Zdf0HJMXc4G8I7AVjOv2kjK19m
-AGv7cwACAgQAlDaviOjHim8qjbzAsxHy/aESIL6+Ddv7atMqWK1e7bOH0Uz1fp1P
-G1OvBSB0C/oEq2PjRuno59mbAcoQRc8sQEKmVW5EQMxlgYTiA+KU+Upt3lCPSoLF
-DcutboWEmY3rQ82DaGp0HaEuPMLhC49cMxnS1B26XMxcFDi2mShdohyIPwMFGDdx
-LdlSNIRd8rkg9RECjMwAoKhmGZLIMKYY1WjGsS37zcnuCpLMAKD/B0XjJbUcVc8w
-2a43skbMtU2j4JkBogQ54qw5EQQAwky/4lE3mSm9LYgHCnWXbFD/ykp6xho5Yp6J
-VALFAtA+v4VAdUXQqSTjhEUyBK9cr3oagzO73kYTTKMSTUMWv7Eq5MsHsuanPLmr
-JFDRKnp4pqv+9G2oViXqFl42nevJkBYeuZVC5x0K64qHiZ4fVKchiaaTW/8iI3Z8
-ph/WmBMAoJYD+1HoAcx9tofDVUBCtj1lm8lFA/40AOJSNMEVnC/R6ZGJB3B9C0yk
-Qfc2KUGLcr8yYskxsCchTH2DV8tEU9dXSAni5H9xzEfOIZDxESjIQC6Gy8VVG33O
-49gMoJsxqYVhsgx/8Y6bUD/867KvO8pWETjStfMNSeNnJvsmWuF04TAzyTK2/f5f
-Kp8VNSZ1u1tWpRyk6QQAm7PvdSi6q5yBnBeOEzMMYFI+QlK2sj6BJy1PmTAan0o5
-rvEahGJk8SQir+3qwRUrmswRo+V+7cLp67a1t28TutzGy2x4310kazJyjm/HUVCy
-IOnlgJ6AMfFVWWI6zqcAJ7VN1sWDqW4gh7fZC22/jea7zbRXJjqkUUGgiFBTjmi0
-PExpbnV4IEtlcm5lbCBBcmNoaXZlcyBWZXJpZmljYXRpb24gS2V5IDxmdHBhZG1p
-bkBrZXJuZWwub3JnPokAlQIFEDti5lp30qslsMhxPQEB/bcD/0mu4UfOi3O/cOgf
-yeQ9Xl2mUEGoO4h9Fyz4JxDsko673e087X7HrFnCBVI8wfzW7qKIj0cgahmtdryE
-nr7hkGAj9w1Fg05ZTXWZH/9lpdRTgngEil2EDvHo2h51WUIpjh7/3/ZOQTT0iC7K
-341h6VjxC0DRH6IFd35QQSTunf1JiQEVAwUQOeKwNWx5eAAqlgcFAQEdZgf/Vn2d
-MKrn8021NhavP0uA3pHGRmdKQ2WJBdLiN2tvLkpAioZtho+op+xBz8j1zdIJQ/7X
-Wko869KHge2BAFwA8rWDzjtaAWdE0Jo/NiARepUwV2FdRRwSxIcNG2CCPyJnfPok
-Rqjdl2z9k2PkwidHSq+2k6JxCWnOcIXChSKfkHnemtA65ixAlhuxvyN3MPuYs1jA
-HyDGcyMfomp1qH9tXFQhhyXRrG2eMAfslstCXGXLcoLN3O2BMR/fG2GlV6kOqGOv
-oMIW3clVeQLQ9B1yyekKiVY6Vg+CgK5qhg8z9tjH4f33zzNDwsx1WSCOU/1LIPzF
-BNbR9QtTF2XmOUfRs4kAlQMFEDtfZyF+MVCmdjvpAQEBtt4D/iShO9KALAxdFSxh
-RVfnWQmTZ9jfsHj9AkWYzxmVhPTjbJgOM5B5Zsn1nvXiBUXH1k1lH+wpHbt4aKih
-fv2bxemJYInYmMNbC84VkU8iYz9yD2FmFck7wR/5PTD6LDki2mJ5QAwdqHw+qanM
-WZnxFs48rVPgeJn6uldFAyZ8epANiQCVAwUQO8hzMLsMy2KDaNINAQGnegQAjZb0
-4oyD2qDKdvBLzaZLiY4tU765ERfaFbXn0ZnValtj+MmqX3x4oFVnqjb60NUFi8U5
-KB/nm6FcDxM12kzFZx3mFFEpR+WT/3fhVVuwkZ+kx9zS4ZUO88sKThElL5FRhlY1
-RqeHmnriIUJX5ZD0y0nxXbE0aSWv6ir3BTOCBxCJARwEEAEBAAYFAjoL1bgACgkQ
-HgbN99DDsiHq7gf/eTITRtHebCbGoH9pQZE1gppa0fmJJihXLQWFnbGy1sz28HtH
-Yl7DDQMXsg3hKYrXaK3BDDq5K04DpCQUqxKpe1UAuQzwxRCmWTPYnt1iPb5w9TsR
-eVCaywy/FLVPHU77RqHwcDo6N6pFUAtYPdaOA8r08nPqcYbrB6xux2Ky9FGjd6r4
-9XhfrgVZfQbg+h2Oki+fLZx1Sxs9sWRbifLbYPBqx9a/gHuUchJbm4zKbRw5L+rx
-tbSNYjPpL5VcsBA6lVmhz9wPwDc0eJ+m1BBjqL8XI4TVAEgTDr/AXmSWi4g317iE
-J0bzOSOTSvlFV2zqLcvBBgZu5FIAlEj3U1A7NokCHAQQAQIABgUCO77anQAKCRCh
-AEl+AbRc4hTFEACWIB6H2922hws7il8M6jvZoT99mk0r8KRdKK060eGIRjS3btW+
-+KxTsBQ9wryGy/Z8WVh3EG8H+ahtzwbuktwyMVOdzVqawf3Ppex1BAZw1VjpyUCp
-ML6Q5kVamggOSZ/kKl3H7FDSRDaLB03TJZMEoilhDzxcdB1w+SyW6PpZvVz9h3ir
-uBDbDgctgUHJdUzLuwKQQ8UrnBsRAc95FBXCQjq/3QMx0IP6T3ejNwt/O3rQUkht
-IfsykwHUcZaLQ4qIQw+vOoWPwPYg1G/ygXn4esph7SEKks+k296oZGDMxt9fW9oo
-Cmx/641dXL38Adh0gxAicHESmqxApjCbdfZ7IRjKQuemBgY86pktRcwXnRk+52iD
-ZGbkRzUumE5ZUNDmEx6WISMULdAoh1sc9IjkDW4PB7UK8qfofjwyvPWJKvXwccYK
-c1vhDeHt3zzij9W2yRPIJlmi/7iJ+ThxqALZvfijd+fmJWUKDNSq2afKRt8f1DFV
-7LKdimQNHrkhRIwx5hiBSfTsxSWnbMHeXWCMxZD/uG8liG4qWkQ+azDGKmNHnC02
-jpnrAOLorKBaZbgHMKxzgRtYoNeM6gnmQ9vGWGsUIp5mnNHx44Chgb3UBfEBjSMs
-DkuVUyfldOjJz3sRSxAFN3HKpAdfDDa3NDG6G1XNJ4gcWTjd0BFwqAmATokBHAQQ
-AQIABgUCO8ayJQAKCRBCtwuQzgi9kXKRB/4o2OBqqGpLuY7d8roEFcAkvS4cVVBw
-6NqyyES0/To0ORsIHa+ZOOe4++b3DTVSLQ4/kxysbQjAOsgBVST3NYHV3NHiVm42
-eeuZrwEwBVK7MHfARt4pOzfDVhqctlgElVVl9xpdkG9l2kFNvj/CNdRFq/74e7Wt
-JJ9OXcxLRx1nkUAkBU4o3J7R3pvW/svP5ZTqbtmBcZazmm1HynWruF/MQ+7yJydM
-WN4pDT3ToefKMMd/PuJlKPcGZ9nHZUuf/RU3UkNpPd9LFQIh599qVUH8kcUXQ5mb
-vZmp5b9zCvmXWJdS+3ldVAwsxY2HpnMirD8UaZwM+fHEbIFh0n3MfSLRiQIcBBAB
-AgAGBQI8Kxh7AAoJELEmyajfoR0VLmcP/3faYEuha7qlozIVza0w4twysEBzfA9T
-bOGicL5Sp7+JeMEAqj6Q4AiI5NnVt95k5cBzj4uoHjhVzJYwcSNouUFFUBOAJtGh
-m7DPkOrZ2Jw5WK8VG5WD8fKIjI6ssIAqj3DE23a7yMBij/S0LE3wN2rXg9E4+DwE
-ix0mj1M4YAMw7AvAYxKHTGWYUP0jP2VojvuJ6Joxj6xMwY4iT3esL5UqPImihnmH
-I7HiRIWLus0o6HzkmNmGRpwIN4qis2rak+L+Psw70RCo+EzcwIeUM0zQlxVZivfO
-tOxVG14ZkrF1GBit99K3KjFRCpTecQ+OKW6uLO8iFeILiVwdGejuYmRwPFAHwAkx
-dYkWR9BVV3ui6BBTz97NrG+f8Rj07yHW+7x07ocr3cYSqmbFpEXGNBKURH1r8L7w
-YH5TTowPl32qiJ7jOblFd2IIEl8N5PbCoK4UNG2ODud8O/DMVtxKFGyY4+JJnG7p
-Ht6y1NQT16LQZXzqWI3ydG4tfTTfRTUdWq4LnhstDU4fmUGbAjAo4gZ/hI+Toj9/
-inpkjQ4vs7CMlTvrcLncHSzFW3poqBFdL+EJcX0DZUJS6wEzreg+CLWSE2HDhXrC
-UzG1K4LM9QQWm6jH1pax95tNpN5mOFURygUr0ZTNbmiAmlLjDkJMEW0isXQOrcym
-LTZIpbKC/EGmiQEUBBABAgAGBQI9oe3RAAoJEI8u8DzpeueD0dAHv01m7F4a5Y1K
-XjGIZ8mBAfa+i1A8DRCKLYHQp0WBKx/ubo8z6JpeKWDRBKItgwbmvqfrt1sDyW2+
-4nTxv/P1xNe9OkeDbLdDBHBD1LVGQea6mGXw76t+KvD+w/ITdvooZeaE9h4lM55/
-pHar9nU4eV4gPCFUQmVBXOXuwz2wwlEkGYBoNja4Buk6xBt50P9UZK6zqQBCQ7bw
-gPonkEvdmENR9GCDI38euIkIPOCSoELdt0gyxR/Rvqv8uXuM1bI+UGHgHOvX+AWa
-CQibpJcssr1VVR2v4kn86ka8bzchWYHw4+7PCZoLqgXfyAJer+C272d6CVyF+v8P
-iQIcBBABAgAGBQJAG/hRAAoJECNCZCZrkyM/C/EP/iMjzTC+KVYe+t95ky70EFfN
-aS6qRTfav9y4Ct7m2eYxyZ2JVhEeMoE1Y/ZfPG1WWcD+olSxrUbiTMxVb8RdlvPz
-K4tWLRj+UECpIoIYpMN0QoObr8iX8VnVrMmgg2+WeGI45q3hwrtqdfboe8FkRswZ
-bOaIVeX9ZaH8f4z+VLkQRE3JorgS978osYv7SoJDUoDn0mWOTLJ491iJO3QLa0BZ
-47ZThNFxWkOtGEDXTp/YXMABvt/QyNUMWnc27Gel56MJp9okB3V2NfoiYhAqaOsL
-UtSS64ttSlRJ1qfyFPu8vnJt35KurJB1B8KpQv7YhGRT3LeX0vuHbRT6+0qu/hwI
-ljcbBmgVAiJiTevIRT1NiY1JtLfWgj793buxHM23wis2QTs/lPfbm8ecvGta972Z
-b9INxTzQ7Fn3EIpCcyaWhOvVrvt9ArrJbRhQxoKIkQy/tSi0vMzwk9px3VtiDcww
-i5ckr/A74GgC+rVf4N7sSGf3YPAaGkFlJWGIvRAtZ+JZ1iKJFHULIL5GAYhvLX6r
-+YHQIH1InNbv+MNMvcFWfQkHo07TRvSTMmB0zRH75LxMooLe186VifIYRM/oNtjH
-GK2WPv8eBBYOQRV5nNXL0oNU5ZRfVAbHbzTJyqa3Je6ZDyIusDDzxuRwA6oC2vG3
-kiRvfgN4jr2xk6mNGLODiQIiBBABAgAMBQI/cU0eBQMAgTMAAAoJEIoML/M6I414
-R/UQAKZS08kPvzW+ktIzm8Os2wMmmYqj27Bn6n14/Y0B39+7dSt4+KM5+4S8l6T7
-up5nMcDCCAcCOkVCUz4pX/plQ5KuCYA4qkruzDz8d3fdJgelvrWFKPithvaDvZlL
-2J67sLETfIBYCe7VXPIm73S0xPCDe98OfzORh1oNenawvtUFvTKZYsRfpQmOmRG9
-rGOMtWzl4ejFIH3pVQpeiory9CW2U3DsWor4ZI+3iybj1Scuu3FxinHlBGxhGKW1
-UUnh6KYiLOxevkmrDinve/3/PKmyeUE2n7eQ6QOznshBCglHW+emojSW3Kr1wKt+
-/YpSmxeIdmED16Pe58DCBNs3s+BF8kJtPCZqCZGfyNbzwGc6etc61xfY0KOvNBMA
-tAPrSC6uOTsYpY2QvSnbcKYNqKBdh+8umGvM0II0R2ALguy0cAkaxrivrquBqint
-yKmWjkh4lHL6fIDDiWiJPiQamSTfE+13wtpu5tvoyS71j1qdtNzObIsHRgxsoRk0
-Xn9o11NhKx7THQDfFz3Y6Rx9kYOvQqwgFgutrvUsHM1DEePSGsQH4NYqLie1KyPN
-GIARM8o8aA4Ej7MHg9WGoTlqAw582LoM5yjYAIpGIR8KDTmMUDMDJSwhI2EnbgX7
-ZdPvim0ySC0mC8qflNqH8SqXa89XSeJcgp6nRVOmqfQiljyDiQGcBBABAwAGBQI/
-S39DAAoJECvQVSqbAePALqsL/ipA7zU0Rbymv1lPD2UOgee5fxorJUAPYOlr9O5n
-KWVrVtNmGEDdIF33KBmOE6j4qdLsTJrUXjGSQ2WKROUhgBJCzZqWNSJ7Z9nmPXA5
-xas3Agmgr2KcDQYQ/UwZKInrwory6Y5c0LJsds9n6JxIjoR7aTaYjoXDzyiyviDb
-JoqNAAXpFqIvJ5oQ3jzQe5t2nJuoEUzqmRuKAJ6yDc8MGn63GAZnrN/XVPTqdVGT
-2DpMLoHax6P0b9yfTE8NA0oEko2L9bYyRHo8bJo/eanvCQM4UJlP8CFLltyAnDpo
-ksqu1eEBZFiIQArVHHfCkrwtAHECrmEf5QF0mDefb7yTUwFZ5y5xgqn2dNhGEQvn
-5g+qYboKEekjO4WGV/+AnW4qH/jv/K611x4HRhS+r1s4pKAVXQbnhFLY0xGV+FeK
-A7+5PoTsjKrYIcH7UX4G8LGMjVLYZrnRkzaGzvv2e0IZ/QZ1/BvxiQuoZTUFQNjR
-HSknmh/3yaLrgQCc23FsWdjgcohGBBARAgAGBQI54rBcAAoJEH2d7s4ry8YhmjsA
-oMUW9RxfXBSos0A6LwGd+5pXv/MRAKCYFLG2T4GSV+qfiRsXnrgDHQHD04hGBBAR
-AgAGBQI54rOZAAoJEPKlddweGoeC/+sAoL5f7JF21mReZ8VV4nhh7prm+idSAKCM
-XDWW/tBOeJDYpiEhgyGSGgJJWohGBBARAgAGBQI55v8hAAoJEEQ0VrKnu+CcckcA
-n0bINySI33ekx/9fubh24CpLOLKiAJ0YK1rjnurjCW5sCvn08YPy21vtbYhGBBAR
-AgAGBQI59jioAAoJENSvrXiRcWfcXcMAoICAQOde9cxQl8U64Eslg24KTK+pAJ9t
-CfaUCdcQhxiR/bZUpjRaUk9H14hGBBARAgAGBQI6CNeWAAoJEMKNxEgYd9TGiIAA
-oMirDh1RADAVWM1UzQX5pn0gdNTFAJ9Wn275P+nGM/NzE/L9zwZi76WGg4hGBBAR
-AgAGBQI6DAnMAAoJEAMS6MNdjNp7hoEAnjPjL8v4O9itus0TUdRlQn195AnXAJ4z
-m6OvtlZBhNCAg9pzGywG5U46bohGBBARAgAGBQI6GKxMAAoJELslbetxrfyH/r0A
-nR/b0nUl7lRLun7ZOo8fkU2c/nzuAJ9YQ4H8n9oK666Gxd3mdcmHYafFW4hGBBAR
-AgAGBQI6JJUzAAoJEAhV/hReJ6UOUp0AnimdgCRcmEw6NdKXwiM/MruyoLGyAJ9h
-i+Wm9SY/b+Crf+E7Ct2izr5l44hGBBARAgAGBQI6JeeCAAoJEBz/8ajp1waIZKIA
-nj9bfT6gMVhFE57aiO1YacrYNFq9AKCRr72sk5jlvXWeh0ozetAri762FIhGBBAR
-AgAGBQI6K0cEAAoJECdM9+maYseeupkAnifLbVudq8u60Po3B5rH4jkj7VeTAJ9B
-ALdvcFH3QsMPNi3zstiF88rcTYhGBBARAgAGBQI6N1IcAAoJEG6ULP1pTeNwmyQA
-oPvtkAnDnH+aIp3fv0uGfEpT3FyQAJwL0u/eqJO81suhPaDADkahVwrqh4hGBBAR
-AgAGBQI6R+rYAAoJEBtgNPR2t58gjQEAn0xo189rO8p7H6qs9J8JJsqJBPVwAJ4w
-NhL6xjOciTjSdSVPfX6Dcu3C3IhGBBARAgAGBQI6VYiNAAoJEFaYkXshRaolSKQA
-nAynoaYFVvVfD+k1lMkPq4nT1T/PAJsGdVrp7QYXUd5MaS1hjfH/wEji84hGBBAR
-AgAGBQI6eFk9AAoJEG/2vJcCFjKGD4MAoK2neQwpeaThU6LM1jBtIlBTKQ8gAJ0e
-cbyEpPZgXwQRx4EUulWvK6xHiohGBBARAgAGBQI6hc+BAAoJEJ853fBeKcbCalcA
-n3ogb9+jPhGUGJjKB2XegKgePH9TAKC2Lli6xVbQe30SRKws26QC8PnmvYhGBBAR
-AgAGBQI6jS5fAAoJEGIEVKJ5Fu5nBPYAnj/8r3k9TzuK/JuIHJu06/61K6t/AJ99
-3rxumjiqbbj/dju/YipMUrngCIhGBBARAgAGBQI6nDT7AAoJEBUjaPzZBkwIgj8A
-nRp1lSjaloBXq/5swZmL7hy+TiXlAJ4qL5JGcsoUmij1ngg9vMFkSFwYeIhGBBAR
-AgAGBQI6pEiuAAoJECQDiafuTpyZ2rAAn2If/DH3iNvuqwZ7J+3YwCKkFVZgAJ0V
-/7WpYpFcawWxv/rAZmMhU81R5ohGBBARAgAGBQI6pYc7AAoJEPGffNTDGmB2GDwA
-nicqxgtqmDme6twd9DY5cUsvN37FAJ40nGio+42zjDMDtp4gOGU2nTHxfohGBBAR
-AgAGBQI6qnIvAAoJEC27dr+t1MkzYvoAn3KF0mCWkDVM8Fw1uE/W/1wl8LKvAJ9r
-CldbFWkfdCcyjclGycUWL/GW1YhGBBARAgAGBQI6wmlRAAoJEEFe8BGJGukbpeoA
-n0EfQclzQPYItq09Uaav5ROv4UFtAJ91g5G5Dt7eLcPHyFzGz4LWGE69YohGBBAR
-AgAGBQI6xnWvAAoJEOo/YuNyt+xvGWAAoLKijh3Dh/5R2VcPp2Ocpujxn/5HAKDO
-zlEqaM2MPYOEZ+UIbhaLrYEUY4hGBBARAgAGBQI63xpfAAoJEAWyDkpQEKwBmL0A
-n1x2j1NwzQm1jn8FeYIArOTHq9DyAKCD85+dlI1lsu9wH8cNJMImOu/IMYhGBBAR
-AgAGBQI66kliAAoJECQOWLWWm1jHJAIAnR/G/NurrSoFKH4dEASjHwmYQxonAJ9B
-wbrWBFXT3MYqJ6uRYRhSQwp9n4hFBBARAgAGBQI66+HhAAoJEI/At40JszEGgoUA
-lRm27Uiaq+FkkIn87Ua4aqimGhgAniIOeb6fSWBqSYQFUgrOrCsNBqsUiEYEEBEC
-AAYFAjr9C3UACgkQkWipGa983KTD5ACfdMcQGf+Yp8oNb0yCEzSprGJ0rVYAn205
-Q1FvYUeKm6gBTVtuQHCk9YhyiEYEEBECAAYFAjr9C5sACgkQx+D2lKJNi063twCf
-Vnv5L8U0r2VsB/yhf6tfwLmQLWgAni2Hg0nA83baW4Z7iyHLcHMxIJMZiEYEEBEC
-AAYFAjssZKcACgkQ/v9WCvitjrniDgCgpuWIBpbKrX6tyjuAVq86e/Iw0tUAoKMT
-I4Q2SPiep095uQ2DJjft3m/1iEYEEBECAAYFAjsvIUIACgkQt+9CfNoR4PEYgQCf
-Z/oMdVITUrQFOJV1Cf0wFWLIxd0An059Qz+rjcZXTSU5Flo6T+Fn/3yUiEYEEBEC
-AAYFAjsyNQEACgkQMSTUCNBS1+JdcACcDqAOcKgYsXKixr3jk3XDqN7cH2kAni+q
-QOywl00VZGy3/1xY5gXNFa56iEYEEBECAAYFAjtBNqsACgkQlPjcB9PRp5R16wCd
-Fb90m5OvimZ1eh8EPfLn/oKFK18AoKr3DXHCBSSZHj39CfmtBKokVn82iEYEEBEC
-AAYFAjtP56EACgkQXkUCMnOIgUCbawCdHSx8ePLBbajbUe0jxz7Kt6e7YCMAoJtc
-2WTy4c1deW0tGhCkmp1ftALeiEYEEBECAAYFAjtZGeIACgkQcAIrUZErEO8CnwCf
-Q4l0nnuFyoF3kPl23zOlNb8+bwoAoPJ9/hAlCrIPmqZeIopoocZ712/qiEYEEBEC
-AAYFAjted4YACgkQkSDiUIMAZoqkSwCfWENqkbLpiw7lgZ8pPrTKliXW+pEAn3CC
-zBHmZYXb2QleXYQcsMcPxCYIiEYEEBECAAYFAjtfRNoACgkQg2i7WWb7wYwafQCe
-N2wve9kwsGyotATbKoLPr3L1A9cAn3T98Ty4Urlgv2QLtP/eIz/J/TgUiEYEEBEC
-AAYFAjtfVeEACgkQpIg59Q01vtZ6uwCeOZDyEeen0rbM1ITRY5gVHbhrbjEAoLOM
-i9xzPc9Acc1nDm34w0vPHTSmiEYEEBECAAYFAjtfmGYACgkQIf3VFb+4gKPrtQCf
-VwKm62WsWj+Q4RSioRTGKDWiitIAoIQFNbBgm9oclsnNslYY661LdmwRiEYEEBEC
-AAYFAjtgAfQACgkQkmD5p7UxHJe1LgCfc4ul1ab0SnIzaR7DirBYK29H9GUAnizn
-J9RfeM4nS3TMjGxaZCIjbIoYiEYEEBECAAYFAjtgdcAACgkQ/QMmLlEgvt0sbwCf
-XLFd7in8Xp7n1G/Jw69HL20RcTwAn3e7CaIOnTnWqaGYXFwQKsW3K/xoiEYEEBEC
-AAYFAjtkmUMACgkQOZUYFjSWmolN0gCg2ySAu58meE8CepGXZwVX2CjfiQYAoMV7
-vPKDNOEV0bJONzdxbJHa2YDbiEYEEBECAAYFAjtkyl8ACgkQg5pjs0vy4heicQCf
-c4+tARJdQVoXzmM5A6bFDqY1dSsAoJmnWN8Rh7Kxd2rb0mrACoktWvfZiEYEEBEC
-AAYFAjtm288ACgkQAvuU4IzqIz01LgCgk1oy9oywJcPNmEsMNtqk2qPj1acAnicx
-1FOMIsro2RpJVj6l478RuaO2iEYEEBECAAYFAjtoGhwACgkQ8L+clySSyY3WoACe
-OzSxhNUy0zcuMDkvkx2BEO3dwZ4An03w5OAJ+CNRIlHMo8Ulpvr/gs4wiEYEEBEC
-AAYFAjtw8MUACgkQ7WTxbyAOsazMBQCfYVkbElRfTpW/6q2KidiRpnb/MpAAn2q9
-P0g8VKGn45RN+zlRLgxGbB8XiEYEEBECAAYFAjt4B4AACgkQIvjvEYYapvGd5QCf
-W5BoT2Zn3qtaopgQS0R3IV+y84cAnj//Ir1BZGAbX5dnhUNE+jGQNwqwiEYEEBEC
-AAYFAjt4CCcACgkQB2XEhsnp/bnTtgCg5kLJwZ/HURcwmfKXRL/kw+weLbYAoNr7
-OKFjDaCsGnYZb4K0AJAHOJK6iEYEEBECAAYFAjuTOx8ACgkQzMBjFhovlE60uwCg
-16P3nn13Bo3ah9w3f1kEXkMffN0AoJE+g5G671AEM3k3JrM5n6QhoyJ6iEYEEBEC
-AAYFAjuU1C4ACgkQ9OZqfMIN8nOQLACghjVGvQiIP7UzSgoypQBircgu6WUAoKYV
-ehDAT7GLL9USZ0932SbTMSrciEYEEBECAAYFAjuYNSgACgkQkC29kYw4qQow9ACe
-JQZEIdnXBDl8o7KObj4j3y85ClUAniBd7MTI3hYZWAzerAWLowuhWoHWiEYEEBEC
-AAYFAjudpx4ACgkQeu61KqChAjBhngCfXJtEmsKYapxlcFjKlLdSE/0fFzEAn0mK
-ByJcvv4U98EsnZ6XgWH1ouDZiEYEEBECAAYFAjuv+0UACgkQr+l9yO+T5cqNjQCg
-ti/CWMJTWoE5kXtUCPd28IulShEAoJRchXN6yO9cwtF7vKbRKPyFr8dziEYEEBEC
-AAYFAjuyl7wACgkQVtINKT0Hg4TTBACgk7kIdp0QdG03oXXru8Mr6iqfrtkAoJFP
-ucZLjshOHZpPs6Y2bo1v3uu7iEYEEBECAAYFAju8j/QACgkQLYywqksgYBp/3QCd
-HPEFQoGHddZntU8KpRCxwbTI0a8AoILBb24XzJmEcFFQ8ZcdjkGssWrriEYEEBEC
-AAYFAju+2iUACgkQptOr2ZzTDT2PsgCgrSaQTPZzsrk3OlOnBwvgCBTjPO0An0lI
-J329aJehBnN+sbWPA1SeW9F3iEYEEBECAAYFAjvPTPQACgkQUk1dcheBzp4XDQCf
-bNYb6nwGdcp9C1CC/i7Ag4VgUG0AnjhlNMn6rHmRyuKxhyLaIIvCVRdbiEYEEBEC
-AAYFAjvW3JcACgkQyWyxmULvU0M/GACgn+jUI9tY6yhbbOj3Zvhvb2qJ3lcAoJLv
-8T4b2Fri1DLUw7NFpFBHN/NgiEYEEBECAAYFAjvZYsIACgkQCHEDMp0gQw0mowCg
-juSnV1U2uyYJkzaNM/JdPBjtyDwAniU+yHBhx3Pesvv2j2ZcIqOdWAZZiEYEEBEC
-AAYFAjvZiYUACgkQtqtGgZIk3wH/GACgxPHzVTPg8PpD7+VmxmjcgQWet94An3S/
-qakpI1EaQg5IZ1PoxammSGVqiEYEEBECAAYFAjvalLAACgkQ89a1hWqaGIhHjwCd
-Hf7kULY5i5UciZDEImzPVCLvLAUAn2v3BPprMxZ2V8B3dEjntKlsx6OxiEYEEBEC
-AAYFAjve3pEACgkQh6i9dqYzhRq0+QCgiesdd7DHo1lmrJoc2Hmyvp9UHKcAn2el
-ADZiv2TL/fJfYNVLkY1dCuBFiEYEEBECAAYFAjvnlkYACgkQpO2E8SaBoMGGcwCd
-HGMnGIcnaPvxTI670LvPvYfV4JMAoOSrI5B0clSF+fFDLERghHiaD1XiiEYEEBEC
-AAYFAjvseYwACgkQL1tKMpmbH0yA2wCgh3E0zxSOq8tu9Vo2RuHwGnvUEpIAoM5n
-sHAzyaubH9cPpg6KlJ7TjFYPiEYEEBECAAYFAjv9aRcACgkQdLDg7hQ9KqOMhgCg
-4bpcgwpTuD8JV2wa8XNRibQJWhgAn0zrC9KAj8MWX4HB369BIhNf3AYAiEYEEBEC
-AAYFAjv+bKQACgkQ49d+H5UWGZEl3gCfb5pl6mL3xwQFq0EHUsMNuEq/a+sAoJuo
-kALmeWO5tzapyPMjjRFttar7iEYEEBECAAYFAjwD3kIACgkQUL1+FvMooiW+AQCd
-HX1NIkOL+6/UI7Xzutm094LCStcAnAyxY6BYFdDEp++1/C0iWd7C5kTyiEYEEBEC
-AAYFAjwV9FEACgkQ85xjo/gqveK5BACdGvwWCw+5Co0eRQ7uIQvir0IenfYAoO6i
-Ylwg5pFYvjcN2eMWpRljbQn7iEYEEBECAAYFAjwswRcACgkQ1sDu3biMcKUBaACf
-fsA6qXCB+ZhnNPqNwyYPPQMRQD0An0phCZLMSHibIXBfvi5XKQFdhojTiEYEEBEC
-AAYFAjw1o8kACgkQfQkeAEIcUTRHJwCfX1vsANX6TpAXR+yIkHUZ1/NhS8AAoMcw
-l05v6UM00wGg6L1YY+u91N93iEYEEBECAAYFAjxAvUsACgkQozRwEVjsIl5E+wCf
-V0dc65O3kg2ZbpqnSQjYq8D72v8AnjcNUWBEa/4fqq75pgVkXHG1/o5IiEYEEBEC
-AAYFAjxAvhsACgkQozRwEVjsIl4cwgCeMhI14FB4B32nBwe3YT7ZuyJIO0MAnREf
-YPBubiAcHD5GdU39Xg9EKa+RiEYEEBECAAYFAjxHw8wACgkQG5LyFGFNXOuuEwCe
-MoERlAxehkeHAqaa7l28tKz96mMAoIIxZAMRVaJkTcQeTsj0J9Pw4F09iEYEEBEC
-AAYFAjxdkoYACgkQFiDKULDTkUtQ9QCeOWvFK1oGs+/D2VuqlLS+exaK1RcAoIAX
-VOYNXMFafByJG8mcyJYWysh5iEYEEBECAAYFAjxd3SoACgkQqEj/8Co6lHib0ACc
-CuKYrQl6hasEk8+Au/EzgaIzmWQAn3svKefyh0R2rm/bYi6NMrR3pKd2iEYEEBEC
-AAYFAjxh2FMACgkQc2shPf8sJp+uxgCghDHpZ8mscniDkV7po1cI63r+yPsAniPH
-IT9/KZ0F8t47hNJ4ntZU7/jsiEYEEBECAAYFAjxmKzUACgkQtC/nHH/DrZZkdQCc
-CVjhxDj6xJ0Lv3PpT/FykN0nCbMAoKGDVNf9kyKNgx219/Qni7hJEl8QiEYEEBEC
-AAYFAjxwuDoACgkQD5EwlQadeUPC0gCgjyd8/q5acdYXtd4tkIU1AUWDoqEAn2uk
-/eLugWljBLALDWlbBPxhTLFyiEYEEBECAAYFAjyCYvkACgkQsqbE3BH1ogB85ACe
-OrYUHl+naIupoaMc+zDA7r1JSYQAniCgY0fwn6oYGlu1C1V7Qb5hYjAciEYEEBEC
-AAYFAjyFTDIACgkQtV7owmc0engENACfT0n6xeFvRbhjKtODwARL+yX2/cAAn2hO
-ViKUAOW2iJnK2khvrTjpCi3biEYEEBECAAYFAjzJh+8ACgkQqpTUbs1p1cZApQCe
-NHHUBpgwhwlLGGB+J/HXpZ7CdrEAn0t/Rh6No7fvVu85gsvgqgD4NSZEiEYEEBEC
-AAYFAjzmcp8ACgkQKwZcgs55jUP1IQCgw9+xaWrHsGJ6vHlxZVYjF1NQKP0AoNbb
-4egx0qgtFbI7CgtRmIlQ/JXIiEYEEBECAAYFAjzyiVEACgkQeElPvVIamnRekwCf
-XFXO0yrvf54LveXjwhVrOoB12sQAn3E+Xol4X4ndBqmByaVJICAuHNA8iEYEEBEC
-AAYFAjz4tw4ACgkQIbyDR5y81feMrgCfTVqDcJ0XJwDprEcMVl2sQyJuLsAAn0ZI
-U+y8P23tCB1ge8fKf9/XIuPkiEYEEBECAAYFAj08QOgACgkQwqyhDwc+JHEL4ACf
-dBRQNrKGuzqU+JdEAz9qf1Bh8A0AoOOq18psEdqW5EdQvFYw0Jv9vle6iEYEEBEC
-AAYFAj0/3tUACgkQT0WsQz/0l0wS7QCfdW2MsCgR2ERyhL2A2x8REdvhn5cAnRj+
-Vqx6hKd900Eepb7sDxyS6psJiEYEEBECAAYFAj1NG6sACgkQQPN8cJTNiPoBmgCg
-sBef4t12dS9b3/mdXgFFJBYGH/AAnjQRHTNznlywGwXtepyKObQED8AYiEYEEBEC
-AAYFAj1ao08ACgkQ6cgU7J/95JxAcQCfby8yGGAJhY4pvSoM4939bpGCRtYAmgIR
-KO6khpB0MmihrEI6dziFH1eaiEYEEBECAAYFAj1pWxQACgkQuQMWIGtm8TT5CACe
-M/uLzjGEjn/n9W8Kh/qWJ4yb1nkAniSg11Aqw09L6zPVr+y4VrgghuOOiEYEEBEC
-AAYFAj1q0SsACgkQbfU6uV4fG87njACfZ0DhJ1UGX/1f5btQOuS+H6Q1VtgAoPBU
-7aPVnxldPZ/h5o4XWpz2yXT7iEYEEBECAAYFAj1sncAACgkQ9oX72eAN1lSp+QCg
-ohiysP20WcFV9bdIfqyVTu3v2iEAnjKdkyN7jwI08a2eOsUMimprAriJiEYEEBEC
-AAYFAj13PMIACgkQpVM9FaYhUOCUIACfer5wu4GvyiroptGjN8HLxcjmiNsAn3TE
-k5bTWPLWGfX/Kp5/pBcBPDdeiEYEEBECAAYFAj2KaZIACgkQ82uuK8DpNCAFhACc
-Cyz1T3Ht9l/qBdT9cJL8SR1rh5YAn1M3F5VGOmOdgnSjIfEwk6LkzNGliEYEEBEC
-AAYFAj2OlIkACgkQAFDl+qFk/Q1bAQCgh4xvjC5FVCeWSj5laYww5wPlq+gAnAvV
-qa883DlLZ0Hi3U7135MQJ3F3iEYEEBECAAYFAj2TNYUACgkQEY3ZoL5uJ3jWNgCf
-ekC5x/q5o4AXDb6hoLRWxhJ2HfoAn1R4WMOd+NevTiJhz+J0edsREKsniEYEEBEC
-AAYFAj2bBz8ACgkQg3DPM9bSYMqodQCfW0Q1YXoUqPtI+PbavPb4fex0IzcAnjmP
-v52QgFiw2lN7S9gxN7EoglxWiEYEEBECAAYFAj2kWc8ACgkQdjdlQoHP513PBwCf
-XkavLzc9/Z44QNNXMBdZKauA8R4An1t4+0jZk2tdlbdnoWmm7t8XaQhJiEYEEBEC
-AAYFAj3RaWoACgkQKGrpCq1I6FQ5IgCgzRDq74P0pOz0rEVzI/A90tV+iTgAnjpM
-n/OkfO5UUfo7aEZa/rhU2RKHiEYEEBECAAYFAj3kU5EACgkQbgiwzIBQRqPsBQCg
-yNkezkyKRM02yXjqb9IMhd2vrt0AnA0P0NuRju5EnsTok+r+pRs9m1ULiEYEEBEC
-AAYFAj3lRQQACgkQKC22VTgH49RseQCfZmNMa/B5JtezgeQOT16FZZaAc1kAnA1V
-n1Zqy7DEc0sXGRlFEV2ztBzHiEYEEBECAAYFAj3tX2AACgkQuxW2ExZWiX0tdACd
-F7Yyzi29Z9mgo5LD+9IFYQyI164AoKAVzz108TKaRb3TzmtEXVqxYFWSiEYEEBEC
-AAYFAj3uEocACgkQugMkHg/A9eFgBACfbFp1ZN73iUt6z3CGJAbLvjxec50An1kS
-BcZFpjAJfcVyGzVPgdqdeM8miEYEEBECAAYFAj4IEdQACgkQRdel9hlgaga2kgCf
-fzTE3rsEinxwU9sbdpooeBdwHdYAnRH5UeIpRmGVto/0US1GNveCgWrbiEYEEBEC
-AAYFAj6P33QACgkQPuR8c4jhFKIqHQCfa824PR4J6nUzzoPjRwzld4Uqm38AnR3g
-ZRgQIQ8ZsuNvl5CU0QZRD8SaiEYEEBECAAYFAj6flagACgkQleiQhuQqcGa/lACg
-v74MJlE15Q8gdOo7x8bbQzjNpbAAn2vm5HlZ1ZYI7g6npEXT6aUN1kONiEYEEBEC
-AAYFAj6+HXYACgkQhLklxa0MWdeZXQCgyW0rbFKbgTQwnsmhTJisErmL3QgAoLq4
-cm424l/PyjmS4QHr+iDRU4HliEYEEBECAAYFAj74p0MACgkQ+y7y9FbF3ZBtzQCg
-yJyuKOhsX6pbM4xlPGP0sFxhXaMAnidrXD2ILoRGZ+S7KVcV0ooX3EDMiEYEEBEC
-AAYFAj8WbKcACgkQbwTDRMQPodxIjwCgiS+3tUZ9qblX5mNBD0HDk51T6CwAn1X+
-8PLIeq47bMR/eEYCwOqX3x//iEYEEBECAAYFAj8ahEYACgkQk+Wzm/ozm+lNLQCf
-WBpY/6Dji00kZl55COSTcBzkW4gAnRvhtc5dDtEPreLjaCcc9OgXIsJviEYEEBEC
-AAYFAj8lL80ACgkQcAaCB+JDIFeQMQCgthZ4PMW56TIxHuQ1hB+6OHTNCoMAn3QU
-kf6MjBFWaSCa8ZLuZKs3E4VgiEYEEBECAAYFAj9HQVMACgkQ+y7y9FbF3ZAlvgCf
-a1rzmXPjVpBLaW8gJmUZ+KE+1PYAoMDHWOsNGlzPi0ecZ5jFbwKp0AYGiEYEEBEC
-AAYFAj9Lf2AACgkQo+qxryDitlB4vACgxVKNHLTjr/W4ha3dTNycCKiW7eYAoLqk
-/A5deva1k461yBUwWag17pe3iEYEEBECAAYFAj9WPwEACgkQoYpWGTfE7kDq8wCg
-uKCDLTHj0QQmSJ5Ruovy6iC1OXcAoJsKqjbiVP8NE6cVToPDAeTRLU8FiEYEEBEC
-AAYFAj9n51YACgkQnmS9sI6ifC0qNwCfcGtwnrHDHYF8cXCD6DSomCmOb6oAn3lY
-qMvXziu6CakHAtn/5xJt/QNwiEYEEBECAAYFAj+QijMACgkQMC+fdwHoOyV6zgCg
-xprelBeZyK38dQQo0D1i31GR0X0AoLdIt790FQBRL7Tz9wR9Hrk7kmZ9iEYEEBEC
-AAYFAj/P5qQACgkQXfUwmzIVCLFbwACfVsTYrkA0nD8vhu/6F4BkreYz3gQAnR7C
-aK3kpC7jZ2iOL+nJJT+ml69diEYEEBECAAYFAj/kAecACgkQk+9jXsWyW9MtLACf
-VIhj+ColUSAo+0SQL+dXy7/QPyYAmwT/Ch8921Ye5yKAqj7/22cNOcaniEYEEBEC
-AAYFAkAGUHEACgkQEwhmFo32INeKWwCfS8qn0L6h8DhbxhhYOTIG/YGwnagAoIZS
-ZlUmnmPYXnrrEY4wLyu4JZCXiEYEEBECAAYFAkAIGikACgkQUriM5KOnlzYF5ACg
-gJfQq09wCFvo5g1wVrWOdfyYuLEAoIIn/028tMIr3rQIIWTCMLDI7SCLiEYEEBEC
-AAYFAkA2hL4ACgkQ5s/mFOYu21UeqACfQ/6TWE6WCCr2QqgODq3hQTROjVwAnRGv
-THvJ/Yyd/r9sx9EP6TOZFP6RiEYEEBECAAYFAkA4xJQACgkQlcGs6wo7juVYIQCf
-Y8ksmBdP7W64dtCwbtou4Pb4uXEAnAt3xICVbkjb2rFIBvZ/Cz90eBCBiEwEEBEC
-AAwFAjveqcMFAwBTDoAACgkQ4zynCI5ifqT7SwCgzv0vNC6h4ws/TW2cxCZzyQGk
-3rEAnj2OXIhBFdYWDLUzoUuRMNrplnb6iEYEERECAAYFAj1PiAQACgkQ06HFHc1N
-8gWcXwCcDQPaHDqpU5JKynKLtFMz5uJiniYAn06g4xAgrIU6vKR0hNXOvSShTxpk
-iEYEERECAAYFAkA5mL8ACgkQbJOOcQCjKmH9VwCfd9/Fx8VZyz37nNuYrSSeak12
-O64AoNCBa+8kFmOmT1s7Wl1D9M4Wg3MeiQEcBBIBAQAGBQI+SLurAAoJEMQAnns5
-HcHpIFIH/3aTAcVgZvCwDkxqqDlKm3P9wIIjMCaVnpBnWZ0RIVUq9d16tWTiO8YQ
-dNs5BdfOTwcPN6Qf8OT8s1zWO+TiLuoWvMRgJDQMY9j6UD7M5CsnwwWQRB1xm0RB
-c84b+fmHY5tBjJyQNeRD9WKuTWYSHLqmCzMqTHseejR+Anvl4zw+U7fccXMfg2UE
-9V1UIRYgehODk8NMB91xNJaVHVNhm0jV2Adt44O3Gjpd1hpiEQuL0vEHqD2Tf6al
-Yz4Jo8KPEUmPuEPv9A/nxJqxvC26Gta6TwlGKo0uicjbqdFeUtWZBFRqIKUO0MkU
-fczAQrWoYkBmFph2JhRXzoGfJWZZd82JARwEEgECAAYFAj/NruAACgkQMO+ERHE1
-42Y4VQgAkmboHTcXUulB/GWM+M5dji63jT1yczL7vrPrEKmzTVz949EkzhfKcQW7
-64xcL8mxbWnmCdzP+udYmDM7LfvkItBguO62DD7S3FSLszW+uJ4Ezc7aPkH4wQhK
-5kx+S+2b7YR4LJHYQi66v8T45GKZttBCPPj0aLpQRDx8lfWVO1TD4s7AGI0FA7DX
-uTmmYhx8sX5jMHB4gkBMeqzCXhGv7q71RDeeVpb8ZL0DceMv1s6FUXIRHZ+uEgIL
-MXoIbGxmIDMWzYyhVnD2pJrT8zhYMBuDmTyQIvaxzJwl9vcLwiO4huXSxTsFESqN
-yksK/JCzIVyUwBNH6aNgMNAe9oudrYhGBBIRAgAGBQI9Sb0MAAoJENX0/2PBGjh6
-qCUAn3VNcel2GP1grva6MZPKRsU6RpxrAJ9jFzuwyw8Doxv/rZ6Vq9V2WshXBohG
-BBIRAgAGBQI9c/XcAAoJEEmSwrX1c3K8qdIAnilCrQ7tQDRXVqhvHPQ02h6GycNJ
-AJ4xfSQh1+EFSy441C3pTn4AR3QNv4hGBBIRAgAGBQI975GZAAoJENy+GP+gNVO8
-2OgAoMGoOfIkIXl8mmtuVJhYmGbI+oXDAJ0URPO1ywr5YMkv6zlu/Sj60IS+VohG
-BBIRAgAGBQI+R9koAAoJEO3RUpKXBcNb60AAn3dO2lX9QoQ7Q6URnLusCGbZvEHa
-AJ9EnAQjxaThspM//tRwG+jUTTFxkIhGBBIRAgAGBQI+jq56AAoJEMXAxcchjRjX
-jkAAoMUvnYF8kXtcbcJscVKUqCGTgI71AKDtu2jmwFxHFvnbmzqkxDpCdtk1W4hG
-BBIRAgAGBQI+mKOkAAoJEI5cpMLbUON8nGUAnjhiNww9RCCujz6mi+hrR8YIfGKN
-AJ0eHCtAWvHilG0ICWNuKV1NAnNP+ohGBBIRAgAGBQI+7v1CAAoJEIBOmxXE8Pnw
-Y34AoJmsn0/o468Vf3Rq+SK5wKY20MJwAJ0cb+vrGnfFiWHMoxbKQvwKLe7jNIhG
-BBIRAgAGBQI/iY9FAAoJEOTlTRGaDNqZJVQAn1cu7t+cSWyHB/RTmQ8Bn5Gxef1P
-AKCwIjKzG+QAtroqRGG+9fc+TJjbEYhGBBIRAgAGBQI/se/1AAoJEL0i+r1xJN0k
-NnEAni7HP9tFNqzCrElT+cFZcgJwK4UXAJ9rs/N8qA+nGu3qS8nRdaFlye76tohG
-BBIRAgAGBQJAAuKQAAoJEFU3ABLBsEnH2tcAoMNDajZzn8Jr5m/NDxnPmCr4SFnq
-AJ9HT7hREpqF/XsXiHjw+hGKbuhqx4hGBBIRAgAGBQJABBKQAAoJEMZfhluOkZid
-Y9kAniEcIGhvP3o7iLLq9RltOjJCybwqAJ0VkRHrBYOEDYT3khxS1r2KMV45yYhG
-BBIRAgAGBQJALq5HAAoJEPxo4Q6sdjs1Pv8An0uWMGg2tsWlV9XASJjlWIITXozo
-AJ99brwgZeHhruXPE/Ex32TfzzfNj4hGBBMRAgAGBQI9L8t1AAoJEIvYLm8wuUtc
-KfcAnjRp07TXOzsr4klmVldF5AqxS52dAJkBu51LQ4vhudxpXlh8k3D3n2fG5IhG
-BBMRAgAGBQI9v+PJAAoJEC8hGeLPvmTAfP0AoKVCo5/+NO4tiGQiehYwlnnWD9f0
-AJ9hUhK+12EzvdZSTK9mQXbA3rxVYYhGBBMRAgAGBQI+CuilAAoJEC2r55+p3AAl
-VlsAn0hY/FwjCg6XWqaLapefMSm+cuCjAKDAVyWXX6QkqA9UlrVV80vxU4h0V4hG
-BBMRAgAGBQI+Twp8AAoJEG31OrleHxvOdW8AoJTmHT0pQE+ynOkjhwYlgN3kDscJ
-AKDotNUQGSfzO7tb51FogUL52y63Y4hGBBMRAgAGBQI+VLQYAAoJENkpKCV+qUXf
-JKMAn3pZHYlymfuy5unJjkT9PqmFw+LoAJwNCcGwACj0WyFvCQmek/lNUh++AIhG
-BBMRAgAGBQI+8JBVAAoJEDSzBkBHuNNZRW4AnRTER+6orBGqcxjR0sprQ73nscFA
-AKDFBjzKFNyI27mXR2FdEpn4+f+WsIhGBBMRAgAGBQI/sC4kAAoJEGDNOu240Kti
-OzAAnRIJtk3X36JDQ1OeXn9RRJAuGE03AJ9WKOp+z/TQZbkGxr23XaogY8yKkYhV
-BBMRAgAVBQI54qw5AwsKAwMVAwIDFgIBAheAAAoJEMhroGpRfQ8ODDQAniYkE8gp
-kMzxqLtnJf0cjl4+/ZGGAJwLE+sbe9oafzxYssGDPbwbA1b/Z4hdBBMRAgAVBQI5
-4qw5AwsKAwMVAwIDFgIBAheAABIJEMhroGpRfQ8OB2VHUEcAAQEMNACeJiQTyCmQ
-zPGou2cl/RyOXj79kYYAnAsT6xt72hp/PFiywYM9vBsDVv9niEYEMBECAAYFAj4c
-ihcACgkQuxW2ExZWiX3ntACg5XYuB9EX4qysEsAAFMMMTxLenKoAni8jbGdhoGXH
-3ZTWJDje0nvE4wX1iEkEMBECAAkFAjprQR0CHSAACgkQyY+3gBPeB1mHbQCeOU4B
-ZrdZ9ahvkMm9J4igPb5nYaAAn3U9ib5GD/acPVG5r0CEJKGeKHx3iFsEMBECABsF
-Aj5PBuwUHQByZXBsYWNpbmcgMHgxMCBzaWcACgkQbfU6uV4fG860twCg2Q5uGDwD
-8Gch8RP6+eLdyvB0uhIAoLPbsGc60Znw7WhKhzzKi0y/wIvCiIwEMBECAEwFAjyR
-8tBFHSBTb3JyeSwgSSBzaG91bGQgbmV2ZXIgaGF2ZSBzaWduZWQgdGhpcyBrZXks
-IEkgZG9udCBrbm93IHRoZW0gYXQgYWxsAAoJEMlssZlC71NDWRYAnR/iMZ2VQex9
-2cIa7bjonfLtjGfCAJ97qpnjK2BWlmx78rVcmch4vLwYo4hGBBMRAgAGBQJAikAS
-AAoJENnlgkXLzYjFuD4Ani2iDbJ0ywckhKQ5cLz0VJh4k9UoAJ49LJ0ukY4M6dpE
-ybEaRO4mTgbWGohGBBMRAgAGBQJAdieMAAoJENrArTcPEkCiOZAAniY9O8rGOUng
-Gz6urxBaA9Be6x+CAKCanpBpmZ+KfQvt9P9yx/d1WpGx1ohJBBIRAgAJBQI/1S9c
-AgcAAAoJEBX+akWWCq7yAlcAnRlMy6IPn/eYrcF86sdtgnhrtZwCAJ9lEUtbgfNh
-F2aLKWFSpVFz5J+02IhGBBIRAgAGBQI/zon2AAoJEMps9rQLyK359FgAmwQUR8aa
-U/Ay93OKpVLe9n971EvhAJ9kzDVzIiARqiD6W1a9hHMH1FGzC4hGBBARAgAGBQJA
-chjAAAoJELcDJlbQA7v76E4AoMPYhEW+VHw23yteFnie7O2h86RdAKDxK1shqL+K
-zD3XAZQ8HqfHpbHfv4hGBBARAgAGBQJAbtiaAAoJEAsA24E9Ijhb1xIAoP7nXMRd
-XxkoGZZF+wdKgiA/mvG6AJ4htu9FBeu7gqNZQkekmex+vRxueIhGBBARAgAGBQJA
-bUrGAAoJEJgPCexN5Y9gkl0AoJzX3Kzk3Uq3+tYuGLD94GfdBeC/AJ9xAfFLmzgV
-kml6dDHiFIuBc5ryr4hGBBARAgAGBQJAYu8WAAoJELOn5StmJDKa9GUAn2oXTzZ/
-zR9/GGOeEFvIw4qpwmMDAJ9XSR5tq5ka60sOvKTwjLpG9Ps+EYhGBBARAgAGBQJA
-VAUnAAoJEFJTyEmOou8meJIAnj4/pPH+Fm5bxEjf92zUe7NsujdwAJ9r7zdcWEkk
-1u3nL46ptVYOk8DuP4hGBBARAgAGBQJAMzlgAAoJEOnWfireQXIWn6MAoLbBNi8a
-0mLEximjL3jLyY1JvMJ7AJ0UxcVJJiVtaYJZT8OBUaKVix5hBohGBBARAgAGBQI7
-l3F9AAoJEMj6RZFuNvzLgXcAn27vbmJCUmt6+syLthzd0bF3OEf7AKCvg7BV3EBd
-95Q7ysfc+N9HpF67AIhGBBIRAgAGBQI/xdCGAAoJEJpv8FQpnhtsT5cAnjVrookk
-QXkbGC2WJ4rxKTpkBNelAJ9m6RGagvqpv5bA8p+9ycQ5Cy2O2ohGBBIRAgAGBQJA
-AU0DAAoJELk4CjHmlh2NipcAoIGgzK6bLXXln6kAIMwEwzB8D+KKAJ49lsgzEd7K
-ju5TG6grl6eV/6h1mohGBBARAgAGBQJAoBWHAAoJEANxTXifOqmaqDsAoIOlRezJ
-fUEuz+2gniQgumhHPotYAKCOPXnTy22eXsQVEM91Rka4ctt0XYhGBBARAgAGBQJA
-mPcYAAoJEHGcAqvJmvMf3ncAn1bPQRhysYFCvu3Uj4/Q48pt2IHVAKDIo9GPDrV9
-VMN/WBfX84t8h3pdwYicBBABAgAGBQJAqxz8AAoJEMPCvFg7oIwNJC8D/i+rxZN6
-xy3jFE7g0Ef9dHXk9L0kbHzDMs34wRL+3B5bdgWKR3PmTUiV217mrd2GjjJAomzk
-kN2Pve2PkvBQacg1166WCnb13aqP+kId+1Zvyg8JH5oDLAD1askROVQZ6fvd8Hv8
-lrGmGj25PvzJbkSB8WKKAKOfItjWi2yI+jb4iFUEMBECABUFAj+QUiMOHQBkaWRu
-b3R2ZXJpZnkACgkQSB6HhDKZ9oqpggCcC2yCPmd+CSLQj64vt7HGJSYJ12EAoIfP
-7Jha0WsRBAJaF4BefvulXSVsiEYEEhECAAYFAj+QT9IACgkQSB6HhDKZ9opynQCf
-bDXSOy3ubf2EGa6b6BQRKjazoY4AoKBdMad6AwY2rSph22GMLhIkkF+5iEYEExEC
-AAYFAkEJpSoACgkQenyZHmk/YxPZfQCghA3KsyAYN+1Dn18Vc3tsyj1XguMAniMn
-uly/kXQWgVKW5zyIyRtKtlxQiEYEEhECAAYFAkESGMMACgkQOcor9D1qil/LNwCf
-fOGXb9F3wy36zlgzolAUP0K2HPYAoMYRiXFqr2+IvaukQHM2b4zXfENgiEYEExEC
-AAYFAkEWIDQACgkQ+xlYQPsqm10ajgCgpaJGfN21kp2Gb9vBLH7ih66ETtQAn1Gs
-iyhbHK35dDKs1PgyxWh3tBs/iEYEEhECAAYFAkEZ+4sACgkQTDCXIWohcimbnACg
-nXjUjvgFhzRgDBp5vlRmqYxFcYoAoMXLpFHQ/89b7dolA6aAcYcldwJjiEYEEhEC
-AAYFAkEgXcoACgkQPGPKP6Cz6ItXgACgmh0cmH1g/FmWHKLpD+xCehYLwPwAoJ6G
-D0WuLZipH4cF8XaR0Xm3l3tYiEYEExECAAYFAkEjBy0ACgkQvIbA0KnrjgigFgCe
-L1XUtAiolJsc7QJgtf69jgt/lwoAni1+tB6pTRwdqNHs3/u6uK76sNEXiEYEEBEC
-AAYFAjubn40ACgkQmuxi1ygNtXsrwwCfaBtiKwpsNBnMU2eZKgQRDNmNVEUAnjVF
-XpWxgVjVzZOFNuLA1MSRb+z/iEYEEBECAAYFAkDdxNsACgkQK9KNMpH/QhEOaACe
-IJTY2Juex1XmU/LDlV0JOfQ3GcAAoMCK1HemijkfIgnda0mqM4AWOkyziEYEEBEC
-AAYFAkDj61UACgkQJbhu7aTV+SU72ACeMVy6yLWJrExtz+Pk3/iZhgnL+q8AoLSz
-sK12FtWQQnay3+dQA70Gq6gyiEYEEBECAAYFAkDk1+gACgkQ8EN4IzBGRGzTEgCf
-Zeesc9iV2ZW4iQoZJfmBZMbEEQIAniWeb4lkdZtHET7eiagGL39yadLgiEYEEBEC
-AAYFAkD70pMACgkQ2djNiH7XAVpFAQCfSh+vvOKKmeaLCK5C/zhNEnN5v1oAoNWk
-GAL8tdcm2hDA9y5gjxDbS8ZJiEYEEBECAAYFAkD70tsACgkQD2nEhndV+jKTAQCg
-jDtuBxvXrgiORnGMKyQ5rh6a3gYAn3OFOzk8Ha+zzGRs0XE/CJHiBJjgiEYEExEC
-AAYFAkAs+7QACgkQObpiqACbv5wbhgCfT4QCbJgcgW5KfNe/lr1vfcw66mEAoKxS
-Zbn/zfG9L9MT5XBoaBH/wjMMiEYEExECAAYFAkCt0lYACgkQuQxgFMZGnX537QCf
-Xjt7e7C9jW6FEkwySjCS121+ta0AoKtFxSttRsDZ/kdYGKc2W0rxgSBgiEYEEhEC
-AAYFAkCfeZ8ACgkQXtn1Qb6VBHK7GgCdGdi8XMwy2OCZqMDv+dFEF5tfIccAnjD9
-PQyEH0OnGVRGsvDVGvhyAHBbiQEcBBMBAgAGBQI/4bfiAAoJEAuerLG7SymnfykH
-/RE17RFviRCEHqFgvJ4/e6xlbi2L4/cxrUYcj8i1WIoyCIlaPj0K7vIQmMhBtXz5
-twVcu4Pcj0JQjEoufSdQt6K3s015taPI0iVmFTb/Yqs9t4M34Jr+y6N89h9gM+fv
-bYTEVBRBJDMoky2DIKrCQK7y6iEAfOBXEDMko52gB2T3xuqsJ+DAZPy7I+2dtD6u
-+JzwqZPpXzkAmf/oCSURMzzJYLfjk2DxTtj3oIlnjcopMq9ktiD5owHlJzKOTX+8
-nCxgn2JpL+696UD2mkz5RlqhYbntwYy3x90NRhoPwJw47YWj1DH7s+IXmROAL5Uj
-/y5yPbIYmHEOmZ0lnnia8cyIRgQTEQIABgUCQVxx4AAKCRAr/dPXH00CzNwOAJ9F
-391aH29vX5z7j79SpjCbT4oQ9gCeM3lScSLaNFhUqK6rMUcEHxmdOQaIRgQSEQIA
-BgUCQWdzSgAKCRDKWj1eAN9UBNw8AJ9nmV839wWRfOGqsoKd2idN95+5oACgvWUJ
-TtuyoAC4zeGvkaFG/4pPGfaIRgQSEQIABgUCPgB7eAAKCRCZyJwbUBf1gn8wAJ9C
-JGM+HgxDwgUejc4GeeIVsTHZZACdEeCQo9GS4Pr56l688ZI3NVbQXtKIRgQQEQIA
-BgUCPHmB7AAKCRChQCvQv/75e4kLAKDIYBIHxkzY+gBYx33eCMzcpVpmUQCgjlTU
-I2r0uohZRpw3fFlFzJHp6V+IRgQQEQIABgUCQZXrvwAKCRDzVO+qQYDDG0iGAJ0d
-IYF5xpH6NIXXGchxOC4NYdxoNQCgl+p3tPjlV3pYj1sp8kQ8zvdEPxCIRgQQEQIA
-BgUCQZ8mHAAKCRCbtIk/plAMh+DuAJwMx7sUhKgfHgyl/1WbtgCI3lJtqACfVBUZ
-bDENYhGxUnv+zoScZNwsXTGIRgQQEQIABgUCQdaR6wAKCRCaeTVp++u8F1Q9AJ9Z
-p2xKJ+Ms5pAROhmjP9rQs7FHggCfQ/KU3Z5hRIV9jJK+2DACoxXzQ/2IRgQSEQIA
-BgUCQeKNYgAKCRDmB9YqXOwnSiDJAKCOlXcPkBr4nDg93NioihH30MLenQCgo45H
-7bHjNXOeG0WiFXDSUbTkaXKIRgQSEQIABgUCQe+yMgAKCRCNLV1loW4hQHQBAJ9/
-dEV46EOObFPf4V2e3k5MQXzevgCfdGlPX4QQ8/HqbybOdjkqipbjFrqIRgQTEQIA
-BgUCQfaeGAAKCRD1tHjGZqmlENG1AKCOA1ihd9F+7TpCEiYrJjpBNCc2JgCeJX+J
-0Ehexeje7GU2+uESKt/vFyeIRgQTEQIABgUCQFfsAgAKCRBm79vIzYL9PmGLAJ4s
-gNjXv7k8k2vOeMnODYbz7vZuAQCfW2WZdFw8kAUvKHKZihA7Cm5gBo6IRgQQEQIA
-BgUCOynWNwAKCRDnSx1i3y0I01a3AJ9oUUI0E//xY68cjlVPK86sSDKvYgCfWDba
-fFMDov4UdFHVcBa2UABEy6WIRgQREQIABgUCQdG/OgAKCRD4g3nYwLiUMP+MAJ9q
-1P44kEUC0AZjkn+v2Cv+sVLRFgCeLICM7HIvwK4B5utE3XlqDwK+wcmISQQwEQIA
-CQUCQdG/jgIdAAAKCRD4g3nYwLiUMKlsAJ9AAdgP8n1dDwBjzwvE4ICaVlrrhgCe
-LcqHoKq8RJPZ+pcklLZ9+kj3Fl+IRgQTEQIABgUCQTgEkAAKCRAczfFVIa/YfYzs
-AJ9JHrQkUo/wavwQ8hpaWTDms38CVQCff9eYaj3OFhXZPQwSxukfoFNwkbmIRgQQ
-EQIABgUCQiHngwAKCRBaveulSVOofqV/AJ4hp1foPOCKSkOdV7Z0XjQYf7mePQCg
-45mRcZ86Um/ryEhIs0JL5Cx5EKuIRgQQEQIABgUCQgPmhgAKCRAczfFVIa/Yfel5
-AJ0TXWn2qtZmEN6tdZ4/VTpH9cOXVwCaAyui/mmmPPCEQcaC2DoC4rHMMqKIRgQQ
-EQIABgUCQh+4vgAKCRA+pv3jVaQOy4I5AJoCCMts0wQQx2Wnuz6Ii+ieu1MMmgCg
-h2H34O+pBg1dHC3GWZiyq9aDFjmIRgQQEQIABgUCQivBWwAKCRDmG6SJFeu5qyoE
-AKC/xmhCmG8fohMkHi1YRgzJm2VDzQCgviLROOstWwPujI4c/6U1MvAMJFOIRgQQ
-EQIABgUCQixCvwAKCRCNlVuZtDxY/syWAJ9psKJlkwGX+25fQIloLre0prFKMACe
-JNyKzd93pqAL2p2FyxmjEExSxbqIRgQQEQIABgUCQj2X3wAKCRDJFnZ6z2bubsFH
-AJ9X+MXZ+Wo9+BI6Jxv7DohpqRORiACgv2ZG6oESd26tbmu6d4nFvLC/6kGIRgQQ
-EQIABgUCPDXcNQAKCRA1vkGfu3Ky8uAPAKD+CZ8+0LJbw4+FyqYjX88HRITy2ACg
-3P/GI5yz2bcm9C68jTGQRDETkH6ISQQSEQIACQUCQGhfgQIHAAAKCRBlYvyF8V1B
-6e5rAJ4u2LkeWRfx0H4lO8Kw8qa15B93bQCcCDAOzq5Tm32u2/ys+xv79W9GiqWI
-RgQTEQIABgUCP/ry5QAKCRCnsnSlE6sGY2F9AJ40DgNKAqjTwJ39As3Ay0K4niv9
-cgCfS5xlN79+zwD/DrVmiAdBufOXdT2IRgQQEQIABgUCQoiLAgAKCRAzj7twz+FT
-RO7AAJ9/1EEkLaNtp5NsNLtFEcOtyu1NrACeKc4ysw08YOihE+1K9o2o+NmJPm6I
-RgQQEQIABgUCQphzyQAKCRBNtucbgGGoMOQOAKDlpdJ662o8GZMZcAa5Bn9zBLNm
-3QCg3hSOMJN3qCx7u4WH5unoMBNvrp6IRQQQEQIABgUCQrV83QAKCRDPHxWT14OS
-WtAQAJUSmIyDG5c/S65bzlVAXCQwk+kRAKDFzx4OT3lH4+ShLFubSNWHkiiCaYkC
-HAQQAQIABgUCQrq8/AAKCRDpsJZTSEc0NkA5D/0VdiADaGEwgFtJOFp6cL5i2BdB
-Acq+aPzsYn0/Z7PwE+sMieHACJMlmpUYUpY7ly84KqWZ8YEsFpqK7GXht0yMyPFZ
-DaAUU3QjasDPbEO2XoFwmNG9rahTzqm30bGASp2nAkVy0tcJ+ajWxt8HkYgdx/L3
-Up/qU+esvIIx51LS3rLPMKYyb9TnK00+75nbsHdHWNdwEeg9DiaQl2NyewGsCy8T
-nuGDI7xtnde/jXdkO2z96pNfIC8MY6VlvZ8/F3NKAA6c3SMd10cUEd+vwosUwIPC
-SrprjtlFr55zKeBoookpk5uDlN53A28vCtLp2LEUJmiOZfu7sCKTW2Ag8kOp79Ui
-mulPNcCBX5Ga9xem2BSL6PetbRJqI3FT9XLl77Ajy4StxwPXG+TWq7RRtRrSgzep
-TRdSgnluy5a0nwplZh9koG4qjCOGonav4YLWY9xN8Na7fdFLN0+hIq3akxksDrnE
-A5FDQBxMKZE+2Z+Oy1wXm0jxRSlj5xvTNeFCEAZfxTr5MjeEc4tdQ+nJ6hcSI3QF
-LpkAPWwV9/zAUDcFCxcor246U4OJ5eFy+qK2m1n/c+nGqtnGhwgiHEidMcdRf1SY
-9cpVw2y/yrxuFBnrA6gL7E1Hkd+q1ZKBpNOMLRehuZLDZ9smezC0559zm83m/mWu
-+3609YuwpR3nDEradohGBBARAgAGBQJCuw7OAAoJEEmc1wj9vBgJHCkAoL3NsMJT
-fulYHNlIq9gdlT2T3ZZtAJ936ffVtCMBMAQCXcukL7SE72NBsohGBBIRAgAGBQJB
-O5QZAAoJEE5gbDa1mq/YqsIAoIbXf+2oLq49JEg/e8IykF1AzgXvAJ9oE0QmEF/q
-IjcsWsgujAQSKttYUohGBBIRAgAGBQJBdVr6AAoJELMQKtJN6F74JlgAn1Ex3TC8
-sXrcb2mbgXcXRCLYKTO1AJwLfOTIuGEttlQsh075hrvMvNYMDohJBDARAgAJBQJD
-NGqmAh0AAAoJEPTmanzCDfJzgywAn0HxMKr2vnxhWx0kOTiA82AIWJdwAJ46K1GD
-g+G2foONZksTmv584Q/VBohGBBARAgAGBQI+Kch3AAoJEIPQlUhkteA3VjoAoLe7
-eWCzTt38QRqWO9Z6hRQo3lgOAJ4qYIlorPixGSowm4rKsFe0eHFZeohGBBARAgAG
-BQJB5FtWAAoJEDn85ni80AmkqEUAoN6+juKQPddMzybnFP7+NiQ1TM3MAKDd0/NG
-LetZbXLrIzO/XUCkVT8vWYhaBDARAgAaBQJDV0E0Ex0Ac2lnbmVkIGJ5IG1pc3Rh
-a2UACgkQ9OZqfMIN8nPSJgCgrb3VZccTTqhUcrWlKx0sT3SsaGoAmgMwoNZHR+6c
-/mM4Jg8NBEjBwLG9iQEiBBABAgAMBQJCG4wqBQMAEnUAAAoJEJcQuJvKV618dO0I
-AI12hMFBAPnydtJfZsDDoEsm9YTbMCOipBBFIs9tsH+QCfrsIStRF3zPaNhA9lYm
-LWzIBD4BmrxwaGcZ0ofLI49RHHFt7a5AlDQ0tYuB1eudVSe8PmvqlyWrkmEE11DC
-1qtZY/5dQovyKs5XhZoz/kAueAxcldI9TD6NRpL4SglOdkHukfqeVaom5CMqnP5C
-vIcehKkRJz9qcONai4HjI+LTtveazXWmCsE3UqK/46OtQU+jFwdqDF0UM9UFkTDW
-0Ox+gpltpCcWsNqSzl6CUu4tlzvrahvRtzQbEsore5ZOL1eohDPFkrzrWztebfZ5
-nqtzeRXzNjTYp/2SOwE9eUmI3AQQAQIABgUCQ1EWfQAKCRDR2VIECemh1aBgBf44
-o92QDDYfzau08virTOHy8tCVosgnP772D7blNDb6YURBmRiXdN4EBor/d59tL/m9
-oRA48Ub13g0Jsg9Chj9AK+fV8FuJYjc5FvKPcKi8GabblwxR2O9+/2l7O+EzNYtm
-Con0CUYFYuHUlpg7bayt6pH/c26uwfFOWmpb5rsHdHF5HHQiuIChHMW0Ag9nahgV
-OA4buk0GTv/KoZtdr8AJHdf2fheoEeLRQlQHbjR5In3Qe9wNIa8daBwC991c11qJ
-AhwEEAECAAYFAkO8DmEACgkQ/0rfvFdiXfqlzQ//Zq1dMcLOfXeFOIPdvbeFTwOi
-icSfiGVxuPhMvDBgLhhABWB/3kUt9GA1OGK8XPBdXZVPy9NEps3W+7FwCL6e/fCM
-ejeiscdA+ORJ03h0EnFmU/+LsarZmPrxda5SEmdjsipwl/Z2uiH2e7uulPzxlkss
-qJ0Ibd7rgn0ZFpYrt3Sp8Ml2x14evBxVzBDcL4iSghKmXqL/TyYFgNtmVe4XA7/N
-gJAx1gdWY/vlPbxht9tu/g84lPZh68z/u/K1v3umqUx3FPkQho2jH73nZ6UEyG/h
-RbLmWA6fiorng975XRiU3NkT4Upp4MB8JZIP7LjuBZ2NY8CWqFsRDyEYtW0KWdaT
-7pDxn2a/C6f/Cm2rPp5d5VZNfW+2kGErhrR//WfnDoh8IwPQVZ4DdWX9FQ6wvcQY
-fV7QtVBT//rW3pkHoTOPViHlh6Uxfw+ojVH8d8Bjl50FL0e7cqI+mqOUuXoPfwAM
-eqB3zJpdDx7VgNhFx3pjCbkWKAVc1cQxhx/V74HKJZhBh2+JQMSkJWn+EAR6UEvv
-1DZINoFsRA1iQdyW6AWxWoVyLNFir07UEbLscyC/ZZGNoB1F9HxBEVuEyrVnyWV/
-6bSpfPvZYHsLQnIa5+aUNyd+AMuwIZmM9cfUYSP1PIsAUjv53AC7oiHbX2hxYqJa
-OYrsKe7CWXf/z64zbO+IRgQREQIABgUCQ72JVQAKCRBozYDOqcHxkEGUAKCpD9jY
-Y+vxDlnAHZs2tIlp5Ru3IQCdGGbUd60CK1eWcQfaxfeASuAxhFaIRgQQEQIABgUC
-Q8L3AAAKCRBaSGhqmjqZdS/pAKCZbST5woymej5+lO38bS4NljyG6wCcDt8GOZ/i
-MqhKp9BfDNP+XM3Vjc2IRgQQEQIABgUCQ/5DPQAKCRAhmpdyhocbsyfFAJ93j4Tc
-YRnBrfBymnTHNnqcOYlI2ACeOUOz2IIx9Wjk4DxzWlLua8oT8viIRgQQEQIABgUC
-RDUf0QAKCRD2v2ZeweIXyuHgAJ9PiGse3O/nOpUir4Y9k8JEN0oH8ACgkJ8uG9Oc
-3/fP3qr+qzJqUQVu0XSIRgQQEQIABgUCRDZ8MgAKCRAWsHVNB6a3bbhYAJ0b1/Jx
-c+BkraUc7CIjQ5LtxQckqACbBvJPJ35NmNi/cPGy0HtpNGjX3riIRgQQEQIABgUC
-RFPbyAAKCRDgn+8l2WSErDBTAJ9CL0DzKuzilTyQYx53KU6o0lTQYwCeMQric6e9
-ZTdb84VHfgTqO5lXNmKJASIEEAECAAwFAkI91xUFAwASdQAACgkQlxC4m8pXrXy/
-QAf/coFzCKoyqe+aQj3bef1w0EkWDMU6VtNfQP55m7QZjEldUCn+4kWaERJAsMzn
-GTijPOA7NFo+Au+kZ8iJBmwOoqn7cUbZEet0YoCyh4S7zz4Dwo/8cqMV1B/FoaOS
-kTf5HxRr3s90T2bs+DHn4etBG/kFgLwAmltdBobVw6O6B6aJV0u7e3vaUgfDv5DJ
-lMLJNT1JfEdlaolBfEfxAp3x7tqfaqncm0Y0TJKUtgS1uNDMxyVnZ4qCcwq1KlYP
-UgC+QIbOiRHCDcvvpHomEVc7zDnu8/uXnLWiDFjCf1gQkeuk1FCd/E3aRk8VN5Se
-2Dz1nSTlFAjrybW/BptDIBH5wokBIgQQAQIADAUCQk/JPQUDABJ1AAAKCRCXELib
-yletfK6mB/9zJP4FXUoH4v5ZAA1Tn1SJuMJuEIn4r2rVR4uFBRQJc0CTSxqVZVRK
-9ydBJbPFQDi6jwR0oXDoYraXNPX1XXK6Uulv+BbO3KQ7b8YYZ3EfRQU8lLppog8U
-CC3PzFY9CjE4sy5z3IDzZkCPrXyUbWea3wr21kPcVZYv8esCrsgohMxNBztGVlij
-aA9A9OMahknvS3qN9zXLHQY5UmTzeqa5s9RvadOKHUl9Hu0ssYHMPdol6LqPWvYe
-AGp3ebJop3nOpOkFDuaikl1kThevCJ12sxWnRh7TeQCNwxznMwbpqM9i24ssH9sk
-iOGqskPSvoD/6YCb3if2EEJkX3oDPY4YiQEiBBABAgAMBQJCYi/IBQMAEnUAAAoJ
-EJcQuJvKV618DgAH/3VwkhJ1YxyyTdOciV5OO7/43GTH9W5H78wvxfeBy+a++Qyk
-4W6yx3CwrlemZLqFuTt5ZV8TUDoPCCIx1PTpAISpST0VbDyD0Dk8of0kj+1ZS6zM
-zQVIft6kItibPBOuYfER2RmV2OTLUPoKHxhDvyO43mOAX4MpwRj2HzlswGQNKrAK
-pOOBJONo5As6+b5DOPHT/tdNkuKpdG0wYw4lgGURO8bU00btinam8KghGnEj44C5
-yYo4mUTU0J7NtUAbg2p98PnCI5eocM+HC1bCuibYUyBZ0oT/F1K4zC6OUuxo0F17
-mTrMsRKttRkSLm0bb+jTtQi3WBdIjstFSZ2+P0qJASIEEAECAAwFAkJiuMsFAwAS
-dQAACgkQlxC4m8pXrXw/hggAv/mMEXuuoyvdkoTs8JGC8EBzHy7p6E3JT02LmYQm
-SoSOKFp6sBVtJglfnDd3QrA5V+SdRepaINZ2Sux3k4x1SE2GcMGfI6bv/nGUCFpr
-3Piap3jGuarqGH2zRiV4xMohe2mLURAVuYjG8vZ9alrZ2XfnETy5/DRjhCEfjhP7
-iPmwZXuSQFFW5+wJxr0pQ3NeoIR16hNfcCry+mLrvTJB0DgbdUGRtihzmFagHbCZ
-rMHmo0UE76D22H/IkQu9L8GMZ9t4niAijc/LoOfwB7lLX/Rje8mXo9VzrdsnvHhT
-5pjsoCYWI+owGIc0WyNkDoeNpVPfcyNqekAwdtyFAFmwIokBIgQQAQIADAUCQmMJ
-YwUDABJ1AAAKCRCXELibyletfE1VB/9Wa+SGPPSLNXCqT/lnHh7r2lvwT/CGc+w5
-D+iN3ria9wbFe1kXUL3DaPoX+65lRY8oaiEjdfOGfosgU0xd/WxOfSuB3dIs8v2k
-zTZKBfxuks1cY5fOt2k4jY47U961GeqFfRgELWQtZnJvTmog+7wN61xQgvoLSfRb
-QQJBT6NLvCQbE0Rbga3LajdX6G//hqHzN0FJ4D/hUOJnd2evSP81sBwW/O1IJYxT
-Zjgw4YMipgBL4yEmVpMKs51TJxVRAOPgW5lqPzTnFUt6aaFSZDwSm0mQGu8blDqk
-v2Q86p17jvDlbjzQEyUCqOiyVjfP3NmW0avn4wMnDP7ENVzWL4FeiQEiBBABAgAM
-BQJCdUVCBQMAEnUAAAoJEJcQuJvKV618/EMH/0/rvB1pN/nB+iLY9g+Ed+ZgMcSh
-mG5q7PCv7c36NA1zeYomSIRmDJhddOTAcEwZqqfrWCWNvNyA/WcX/D72EkmUWX95
-NkhE2W3SvHOhq1dpfKlRqXQCsJ380NbGhfLghKdnuZdnrjcXnL+9hR7iDCreqtU9
-l3l2W3dQE8iWXt31Z5eCd3CEyREpGYg67fpopt3gVvUrxckgJd6XJA9jqVJeMk6F
-GSBLzuUA418cbgaz3hUnvpvGALKd+jCgu0spV259xyQeQ2M3hIsJUNEYemegMkS4
-d0mwuZwdxyrbMZO7W7yXgCrL9MN7dx/trYx9RMeC3o24NWrXLpe3M4a7kmqJASIE
-EAECAAwFAkKHFwsFAwASdQAACgkQlxC4m8pXrXw6LAgAwtcTMqqLIgOyGZXaiH5f
-bCQ+dBQHx0wMVB3LBeLVF3Shr5JDoe5xrSyeJ40qZFjOgvFa0Sia2j0ZTzI3pvJu
-3sbjEAcXSBJ2LEk8+dg4E9xxaCRBGYwwhY03JTkwcn10fSZ7jD2ytvKbkx4u2mQv
-oWIeE9b5GwOZkFPWJnXGeOMMSDvSkxCn5wXNHrYY+PQJYA9BmUVlnkk8hbgWKKFi
-m2OdurNB6v3DoODZXOYvExINIXaU05MP+ZU57cyfjgaGxteIfolt3kwDkNGherKQ
-KhBl7sBu1FM2JPtU6zwUN2+vaw/TT0/npI5xXKumITqds8b/mVQKHGlbEzrIN0IC
-nIkBIgQQAQIADAUCQom9zAUDABJ1AAAKCRCXELibyletfKyWB/9ezlx19q/I1Ge2
-bACeVZLvNNdrGg/LkuDAAp1LwIyMDQqr+5Uf08kpn+O5SfULah5xTTIWsyHTkBux
-wr+o24J8WKYjm+0ols+ykMD0YsSdL8lhs52cvXUG6dyeti+m79o4kjnKlkhJ4Fjn
-I373rZIzdpdKJpo/JAEIQ6ZNmuDRfHJnAtQFmH/qolWbBSbT94/m5ayL1aGHVUF0
-6ZKD7gxl0V911sK7Bccc3869KBPIJV1IgYNq8h23QeZaNyxMqHfMQJcxFc/4BbVy
-/lYsdMLGKGMmOCxMaz/fzuiP0FS08VDkm1irTOvp8JtiHxcOaDnHuLS9rqrH6w1X
-SZJcH2M/iQEiBBABAgAMBQJCnNyMBQMAEnUAAAoJEJcQuJvKV618CSkH/jPcaZAr
-kwmHS4TeH93NaBHMQYhdt9L4eHH+t1jmkyhSpnc4wf0/Sbf7+jCMWlAak3yVwAIQ
-Y+eML2lpLZiyWCCr0iH+AE9jsv4tawgq4IiEUuD5lesYwCZTYKw1lI50LtbKRS76
-Jz9AeaaODQqCMswRzGvzVGmCzzGDCWvLa7Sp3/WP+qJvzc9rclwB7zlS20ycyOup
-eeN2jZ77etTJ2PC4WTC/XU5g9Oygk26MBfI8ypvH1ORYAoAa4J8oSoxcFfPA26WU
-9JjBS89In9iChpRobygELx+SsZ9RuTfab3TVcM7UiTr5itzPpgGQs2ZJOo17CTzR
-UJKSmc59KK3NDsSJASIEEAECAAwFAkK3P2EFAwASdQAACgkQlxC4m8pXrXwrOwf9
-FBK8jl4oTqMGZHN2ST7obZbMyEzoh0r+kbulKXCfRjTxIHk8U5qcgPOTRzTcoOlF
-HiTOIeQ+phTVHhgrP37zWBZ0FzMnhlaQ0WeHSvub+DKNWvWKVx9XPyNtShwiXy36
-qBhdW/VNL6DwdhdiEOWwtyBrnBYgHS7qNeK7wE7gBLTA7jDfEXTOXF3CxhsFFAYs
-G/9sa6D2WWeXzvT+rPpi45v0lJ+36AEkZOk/3N2HJXW1Z7yIYTdvw7YBmrFEQVsE
-HBXDeNdLmKKr5Xp643Zl2VlYT0o54qmcBwfny0lZ8vCtg2XKoCUZux8MybKI2Muu
-QQtdFrWgKFkm2Sj4AeyUqIkBIgQQAQIADAUCQtECogUDABJ1AAAKCRCXELibylet
-fCgeB/9y5Hg7fRGSTPAOxnAeaNWAE8z2Mi1T7Kxi7vWeo29P5sdYvlXrBphB0Wln
-820CFLtJPwOcSraNh4iQM8rYegP/NuyZWRN70RhgiYvotkBVFcSDSRXiUQsvQ1rn
-nkVf2gqVxmniZThkNaak4djvDHn36weiDqepE0eacLcnf9Id1Iscelr4kOapMAwG
-esMpYCOvvCErC5oFP48jaOFRXLIdJkJxIpNu7JVWcsXg9XFUZ4ybQdgYAcpv6VF4
-hxOXjF2J432JsOGwG0jD97a6xlXDQvR80STXo7qEWYnyQderXzkq92QnVyLkEAWb
-vr5ChTkonXfmmpBjKB8C2kcOgC6aiQEiBBABAgAMBQJC0aewBQMAEnUAAAoJEJcQ
-uJvKV618YDwH/j86Nd+fHdaQ477vu4NjyEHIUDMyV8ijZq7WS9Px+1ouH+AcRz7E
-Ph/6c+9QXGHiQJIVKCnuES+l7mD9An3hLF8VDq7YU7+MirhCFogOi3VLTSqJ8Rz5
-O2uPI/4ku7T0eOcGP6N5ZOIzZCf5Cz+AYVTxZ4H80Jc1djXZ37kqv3QmGszvDE3U
-zNegKaO/vi9kPjV330kLIlb+oLn8USR7IuWnD90obEuHv7FzmoWylTAlLvVZnJEx
-ZI1nnn0fDqArydrd5cvXhxti7vMsrY2eIGMhou16mEn/NzFybwcYxwzDf/TvzCrr
-x5xRipD1rCbEJhx7T4dZAjSPpBPOPvQPEUqJASIEEAECAAwFAkLjcf0FAwASdQAA
-CgkQlxC4m8pXrXykrQf+LKgdS2E3BY0WMGjvpcW9+Ve0XQ42Pd3fyW1uG0MzPpJp
-6Wt6dlMAZCGTMveOwNH7i4/w/P8DVb3wQcPSQb1Clx+u77x9sXJP/EX90DYfieYs
-DCymbBw3aI7QUhF9pn7bWalN+3o2zIGOSOBKczvjCoFuiMUmU8QMX1O4mtOJlpV8
-QVwRTp33KwOkt/SJ3ddlCWNYerdssSictQsejFGCDZynkhcoRmWMD4YfqDiaA6lj
-8Obtx+hBgq/JtnN76oqWkOAWGhevtANulP4O/n6qwOSdVscXgTrByE0PejM1AMul
-6AZ0KhjTTN9VLE7mlpPwHIHjASv9cq0Q8fDisjP0B4kBIgQQAQIADAUCQuyvVQUD
-ABJ1AAAKCRCXELibyletfE/DCADDXiXcqp5tdGGgGz1nqDHzNNytNwr/JGnpkraQ
-2QgpMdVM73mb27ZJ/KuZXLJBHXvnunG2OrZIZQXBz5lzOL+v6DTNZxLHU0uAH3Y5
-vaNbGasnhPKGdbX5o7vt/4+1PgK8YZ/nPZefqrIbmdRkuY1oo+YJB6mNy3dLiNM+
-kk+gVM/v6CO9XXTpzUdzXzvtd+qRp+MKaV3jkxwEBONN3ulkv1/LfrWNRq8fJ+Vp
-cFWYFtrZG71RhtL3DjpkAolZwd6BsQFguozT35iI+bYJS5WQfjIjZm8lJAokmWFb
-bx9DjNxUJlABbCAUhdlRoepemGiWV7Dtq1I0uBQfz1t6TGggiQEiBBABAgAMBQJC
-7VYoBQMAEnUAAAoJEJcQuJvKV618eNQIALQMXuHfk4Zces6KkEp9l1kRRaaRqbCX
-n3c9q6OHLgKl17ix+cZonAoDoT0k8PavbBzqJjhwg1oJqIayCCFGxs/kpG9MoT17
-KdIf/GV9rTYm860PGmpBMD0vP8EMEunmnfLYGtfytYFJLcfQTEnKR0hqbiOjFVtQ
-ZOoiJVA7NwkSjUQ5KFAGcohvVHOiV0Lr2sh+NTfjA9lNAwlZ3SKSGDVCPlOsjvOA
-S47wNRUdKSGeQ5oU5Hb/dCFGthdg7wdkMBWU4Es0hsFpadFcAekY/hgM9RzC07Zv
-xaAnpfmTwQRnFqoTjifngVCYvgYl7Q0xjfvwQ71431Ffw1sfk0xR2fGIRgQSEQIA
-BgUCRGyV6gAKCRDEEBZI0RNaSxgqAKCmWb0sKQMtHY3Nan5AYlt6MLyLhgCgwWi2
-Vf3ccVN6L6kqdML+eoozpK+IRgQSEQIABgUCRQxbaAAKCRBpCnQ6XgPXFgZmAKDV
-1+RCLFlkutFJfQ/b0UldOLLVwACgg+FccWA1xmaiiBK0lVubxbKW30+IRgQQEQIA
-BgUCRttuuQAKCRCp4hl9WkBdjGcpAJ0Q9wrJJaHsjbFXID9Jqz2snl9MVACfaTRk
-A80gwpF67x6LZ6sE4tOdW2+IRgQQEQIABgUCQ0p5OwAKCRDvnrfDDR3ibipeAJ0Y
-7u9S/+Y5FKMVd2niut4+EgP+dwCgsIHu40owlB7g5F+E/AGHmkqdgImIRgQQEQIA
-BgUCQ/yEwgAKCRAeUgMMcIt1wz+oAJ9HFE6JWpIynU0ALQNKTBZ14QWcvwCeO+WV
-JpWrDwnuPzenFw9TKRxVufGIRgQQEQIABgUCRBrvpwAKCRAhC/Krn3HUScK6AJ9K
-LP3uzhU9U4dMpo9E3onxljn1CQCfY6izVeKNEWW/8z5W+plE2aOhvKKIRgQQEQIA
-BgUCRHakhgAKCRDtdSVsHuUfaaLAAKCV9YoXAqj9oDdKXelQMeuNtn9MhgCfXJ9m
-rzwaZggGx/PbmpiA0EZ1nEKIRgQQEQIABgUCRJjmTQAKCRD7SeY4FPTib6iuAJ9i
-BZryTLGNGClkqy/S1FRbKc/ORwCfYE8L4PBDG3JaPSItbJ5OH/zVIveIRgQQEQIA
-BgUCROE3AgAKCRBwiVW29xHmXrt6AKDq7q8HFdgDC1kTOPJqYZxnjojaHwCeIaPP
-H67iYfEiciv8R8A6GpOVutuIRgQQEQIABgUCRP2b0QAKCRD79V+u1gLw/8Z3AJ9z
-Eq0UkL/TkPWs3sdSBqxgfqEclgCfcz0IuykflGrYTsEAcj31ttg+NmaIRgQQEQIA
-BgUCRVvQtgAKCRDPN7v0phZJI1BgAJ9sLtNxl/5sAwmhIBSQOChw4kj3nQCggFfm
-G4vTiGhuxOiv9gGVKSct3feIRgQQEQIABgUCRWtdSgAKCRC+wlealWpazyiJAJ4q
-eISXI94tUQrBwwQeln/LGmHDPQCeOQ0c/+Se1uVS/CNVV2MxOApbVZiIRgQQEQIA
-BgUCRZNxpAAKCRD9+FRXs92MMsoyAJ903ypivSi8liv0DzMZBvW3MNi+RwCgs/36
-w9kyMSxd0kGEKrz2DPGPJWqIRgQQEQIABgUCRceEMwAKCRDLQTAEm+Qcg8eNAKCH
-TnCetq8UKwjT24qm4iK/aCRYQACgo1QER5aKXQ+xn8P25TfMcVbLcKqIRgQQEQIA
-BgUCRivC7gAKCRCpoY/MCyX3glmUAJwKUUkw8/jySmS/VO6kq9nNAxfMJgCfZlv3
-iTuDGd+4lTIZnYLOZuL0veKIRgQQEQIABgUCRmhsGwAKCRBeQmLvNg3sg0pBAJ9D
-fmGyxZcqlwLwI5J8lvjz0LkywgCeKZ+q04zviWIo+m0CHRJw3d1y2GWISQQwEQIA
-CQUCQ/yGbQIdAAAKCRAeUgMMcIt1wys+AJ9kKsm5/b8S4PXpvPbUy+23HoCPUgCg
-jiXcOlISMO4wbj2nr47zpGlhzTOISgQQEQIACgUCRkhSZAMFAXgACgkQwkoPY342
-SFZUVgCeOOu4gFfQJ6fw2UnLUe6MqGxQ7zAAn1yRygbmdx6vlEX8EODtO1Ax/btY
-iGAEMBECACAFAkJ3OEIZHQBTaG91bGQgaGF2ZSBiZWVuIGxvY2FsLgAKCRA5yiv0
-PWqKX7ERAJ4+hfOYx+yBvJzD8qtawcylltJMkQCcDxJQGod8n41mszmRsk/oyn0A
-ZWiJAh8EMAECAAkFAkXm7KYCHQAACgkQ/0rfvFdiXfo37A/6A+ci1hreJiLkZWkh
-FOHoesFz5/80hmD7HIDfNZKL3+dLSRMErZsuSYrdfbEPRO2cRD5uhL2jds4XdhYC
-wz6amtL38MPivFwVt5ZmnBKO//nHnFcpQjbBxQELYvN3gLzWguN4OYs4MpKioscs
-9WyNePaSjWOJvBZs+7xglGAuHrIyS7ti0O9l9bcldGKgrrixyY4enfGPqtGGJlSY
-eqP2eNo1sTA91Nr1KHtx37IJ/1QRl25bsz1ejgaXceN+XKLMX9zTBls3ly8yQXgb
-Hlf8wGZVBWnPq13gGNN1H9l4hJiulI8akV/adk69PzsOdlTvRTNJdwUyIFUvPcW0
-vV17pCudT0AX3ZemPsHGoDeeQMgcNhPP9bSjojc57atEiUcOoK6ZD82EGPLBjtes
-183DrS4AEiczHmEjC4uDnluVT2H841CB6HGHAvsxW00lSDRmZQz94+ZSaee1142+
-p8QZJeoWpem0MFIlkbaFQL+k23cfRTvvfVVgE+4XTU14/TgDC7lNKRdf8IBKJkWB
-Fa3aPn7JBC7hKqQ6RgofObzHt8RqBfvD09+C2LRkrPqgVmYyylWWWz7pcahNM+jl
-CtoCim+jGavWVfYAzHjBjBkSj8XUR9mZcorBpf/1eywTQ8QuVWSrN50eRqi5YlMv
-Jv0qcAi4PqsMUn+N2x+Fqq3VFh2IRgQQEQIABgUCRzjMIQAKCRDAHykL4aAyrN9Z
-AKCu2Ea2rHIlKiYCT02b3dAaPjdAlQCgxXL9AIHWejbZE5J3uAruFq5sjNaIRgQQ
-EQIABgUCRiI3zAAKCRCFlDgaMpUfXREcAJ90tMSivsfnKRK+BwFmb4ivo+ixlQCg
-n8R3oXTcQTsdeRuA41UWomdI6KyIRgQQEQIABgUCRqpdUAAKCRCy+wRQJ9CQpk/F
-AJ9Mv5Ozx/L4o1UthBfB1NhD8NpSrwCffzAYzzbqV+rxJRtI6JIkDuJ0h4qIRgQQ
-EQIABgUCR48diAAKCRBRr0V+2m3VMiZqAJ9HtrbsqsYHjdPwC9SkdOpl2ZYR0gCg
-kqWdGBYiK/LQG0YDrQMrueeWVBaIRgQQEQIABgUCR6Ff/AAKCRA2Zog6U2NQnCPd
-AJ4sDR3nawaL/UmnSm8ljgGkkkNUFQCglKTGXUbgbREONKjqAw0RwoxADHC5BA0E
-OeKvnBAQAI0Zm7cWpnGfRGLEE50L1S9per2OUZ23NSQ3XMhxFdZB+TsGVqr3JC+E
-ICigW00MMgfaQqNJxiuD5AW68C9KLrXx0OY5fQ1M0yiLvmrFlMn938X1Feo9gOgT
-KOfSjZyc1MidvgeZ2ujTfJxKhLZqIETrY0/C5Dr2MKg+AP+a3tr8LYnvoeclOgPi
-W8hkBepRcU4WY+FhnLQYAnAwakZDwBvAxRQHXjeUjAAK5LQmLlBl4tqvPBA6DrN2
-JMNJAfPmRRAs/+P2/d7KOVrdoQteXMQCaFxX0d5lx5uNBB8zHRLm5VleG5Qdirxa
-HEAu+Px++ED1JHFpXOhIT6HSr3Ljn04pg8V2ohjRTjVD4DQJgl3laDmqXVqW98e4
-jhz8Eml3iUqzvX3OBrVYw7/dA09npCu3L2JPLOgd7e6e1ttV4voUYK8OtwdxxJa9
-NYHTGWOVwOXsv1pp8gUuizsV5Z1QRPtrkkPmmR6qy6V3Eb6tmaJzs4g0hNIveNGb
-yMJUs59FOQUFpfYQQ0MO/EWnGKmhb5aXBoj4++W0nAeHnpsVelnaPZrStFa6/vW1
-knFxxL+3YneF/u7QVimwPDPoNvdDks43kARi7j5m48cxCh/Y3FLpswc8wuceRhYe
-zp/Uy10OCv6EU+kEKUvd2K01W0etBuDCrHj1iyZoOyIGFLTXSxo7AAMFD/9GgAKv
-AplraLq49V8TWKRZjgFOOXeuXz1lWnSshfvgfrg8kTWDex0Aqqye3XZ9LB+zgbZR
-PMmgWIEdExZcvffi6kDuK+CBgsNkwYAObSpzlqDxhM76y3qMUd0pz6ygZQ78eka/
-YdzifVFebvzS4Zgj/YjddIFghs7FmzjXOqg+j7oG07q+OA+rOAkSvcX5Ykq7Rtxg
-R1vdeFCoyXgfbHt6HNa5Jjb9/ZCnLt98OF1REwI+KqV58KCb8ut/XX750LdU1hYy
-LdtIj+5vo5E9wW+durieRriZeq/LJuTmGEbOXC4oL1M0WVYGJ446pUUQAsdFnIpy
-iueS4tj6uKnEVb0hOU695E1ExsXZstddxW7HK4S4XrBgC79x2+43nlgvI74hhGh6
-aYFLpker9IbCNa2dyvJAoxUQGFfDirXBHjKo6LgKxouXZ9ApZhuJcB+EhHagv00T
-VhhvolaVn+cgo/Zr7b1IS7ZekI1xy6yXUhsmn76kdREDCWnRbeFEb8dDO7amSHMD
-1J2/uznPifeqaHvxEJp5uqxZ06Vs9AW2OJmyQu71B+Cci7NJ5cYiqwY2jvQtQ0gC
-gaY6moGvHUHtWZCziNheyFhXildi2jJfmb731jBAjXKjVtn5Arj/Rac+SvsYNtU8
-KL1cvd2lioBS+tOoyudaEKd31vOsVyvWRimKkIhGBBgRAgAGBQI54q+cAAoJEMhr
-oGpRfQ8OTPUAoIYZh2pc1n/AdwFa4z67a0RwNb5YAJ92EZVOZdFeVtYl7x10AlBy
-BEqYJpkBogRIfaqlEQQAiqNyeVchgYAKXJuEHXay8qEt0sbfmlF9QkObfF/FHtrV
-8qRzkY810ciogXgHP8iLi0rchNOT9A1UE3un12cgyBmW8PBnOpHc0/sLVpkB0Gzy
-3F1d7E/+2Q20YNkoZkpXiVDYFgASyCjZPIihn4DCq8x4lHniys6+npX/oWGDTH8A
-oK0GpYEPwzyJVY45ib3Z2TmDfGQtA/9yRKADhURbZpLn0uateyTIy9F4wy+xwDSX
-Nf4UKAKejnJIV+k+6Nj59UCSCaEAEd34yKmF/0EJ+svktj60ba19H0ahzRh2P0Mo
-1oHX6iO5cOh+2u2u+ztrA9DH5C1qgFOj0KfMWBEBQ8HQ14IIJhVBIYpFlmzSVP6T
-YmpYWITcqQP8DwgJM+O/9+kWDy+/Z2WP6eoglvrX1LazOUZpE2eZalSsWlr/elqa
-R8KMqTFpOuoJJvxCufMymQEqb4dWuZ0Np4lh3Gs59k30X4AYAXxYsmhzYM2+ng8F
-nCDBxVYJZ4G1C+YcJgZFlZINhsUpDJNujghYa8RefSG5pH9mMUAV3pu0PWxpbnV4
-LWxpYnJlIChBbGV4YW5kcmUgT2xpdmEpIDxsaW51eC1saWJyZStseG9saXZhQGZz
-ZmxhLm9yZz6IYAQTEQIAIAUCSH2qpQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheA
-AAoJELy3z4d+fUenDbQAnRfRX+spKe1EMSpGyIgj5H2lXe/mAKCD6kx1IURSvYvN
-kTo+vCx9cZ82cYhGBBARAgAGBQJIfazWAAoJEFI0hF3yuSD16CoAnA+WJR2vjn1S
-mykS38WjR3nOx+VAAKC9k/euHMg0M/MssqoLhJdYYwQkbw==
-=0z9e
------END PGP PUBLIC KEY BLOCK-----
diff --git a/freed-ora/current/F-12/via-hwmon-temp-sensor.patch b/freed-ora/current/F-12/via-hwmon-temp-sensor.patch
new file mode 100644
index 000000000..49ced3e15
--- /dev/null
+++ b/freed-ora/current/F-12/via-hwmon-temp-sensor.patch
@@ -0,0 +1,391 @@
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 6857560..4414182 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -795,6 +795,14 @@ config SENSORS_TMP421
+ This driver can also be built as a module. If so, the module
+ will be called tmp421.
+
++config SENSORS_VIA_CPUTEMP
++ tristate "VIA CPU temperature sensor"
++ depends on X86
++ help
++ If you say yes here you get support for the temperature
++ sensor inside your CPU. Supported all are all known variants
++ of the VIA C7 and Nano.
++
+ config SENSORS_VIA686A
+ tristate "VIA686A"
+ depends on PCI
+diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
+index 9f46cb0..32ed56a 100644
+--- a/drivers/hwmon/Makefile
++++ b/drivers/hwmon/Makefile
+@@ -85,6 +85,7 @@ obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
+ obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
+ obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
+ obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
++obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
+ obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
+ obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
+ obj-$(CONFIG_SENSORS_VT8231) += vt8231.o
+diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
+new file mode 100644
+index 0000000..2abe516
+--- /dev/null
++++ b/drivers/hwmon/via-cputemp.c
+@@ -0,0 +1,354 @@
++/*
++ * via-cputemp.c - Driver for VIA CPU core temperature monitoring
++ * Copyright (C) 2009 VIA Technologies, Inc.
++ *
++ * based on existing coretemp.c, which is
++ *
++ * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/jiffies.h>
++#include <linux/hwmon.h>
++#include <linux/sysfs.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/err.h>
++#include <linux/mutex.h>
++#include <linux/list.h>
++#include <linux/platform_device.h>
++#include <linux/cpu.h>
++#include <asm/msr.h>
++#include <asm/processor.h>
++
++#define DRVNAME "via_cputemp"
++
++typedef enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW;
++
++/*
++ * Functions declaration
++ */
++
++struct via_cputemp_data {
++ struct device *hwmon_dev;
++ const char *name;
++ u32 id;
++ u32 msr;
++};
++
++/*
++ * Sysfs stuff
++ */
++
++static ssize_t show_name(struct device *dev, struct device_attribute
++ *devattr, char *buf)
++{
++ int ret;
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ struct via_cputemp_data *data = dev_get_drvdata(dev);
++
++ if (attr->index == SHOW_NAME)
++ ret = sprintf(buf, "%s\n", data->name);
++ else /* show label */
++ ret = sprintf(buf, "Core %d\n", data->id);
++ return ret;
++}
++
++static ssize_t show_temp(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct via_cputemp_data *data = dev_get_drvdata(dev);
++ u32 eax, edx;
++ int err;
++
++ err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
++ if (err)
++ return -EAGAIN;
++
++ err = sprintf(buf, "%d\n", (eax & 0xffffff) * 1000);
++
++ return err;
++}
++
++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
++ SHOW_TEMP);
++static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
++static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
++
++static struct attribute *via_cputemp_attributes[] = {
++ &sensor_dev_attr_name.dev_attr.attr,
++ &sensor_dev_attr_temp1_label.dev_attr.attr,
++ &sensor_dev_attr_temp1_input.dev_attr.attr,
++ NULL
++};
++
++static const struct attribute_group via_cputemp_group = {
++ .attrs = via_cputemp_attributes,
++};
++
++static int __devinit via_cputemp_probe(struct platform_device *pdev)
++{
++ struct via_cputemp_data *data;
++ struct cpuinfo_x86 *c = &cpu_data(pdev->id);
++ int err;
++ u32 eax, edx;
++
++ if (!(data = kzalloc(sizeof(struct via_cputemp_data), GFP_KERNEL))) {
++ err = -ENOMEM;
++ dev_err(&pdev->dev, "Out of memory\n");
++ goto exit;
++ }
++
++ data->id = pdev->id;
++ data->name = "via-cputemp";
++
++ switch (c->x86_model) {
++ case 0xA:
++ /* C7 A */
++ case 0xD:
++ /* C7 D */
++ data->msr = 0x1169;
++ break;
++ case 0xF:
++ /* Nano */
++ data->msr = 0x1423;
++ break;
++ default:
++ err = -ENODEV;
++ goto exit_free;
++ }
++
++ /* test if we can access the TEMPERATURE MSR */
++ err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
++ if (err) {
++ dev_err(&pdev->dev,
++ "Unable to access TEMPERATURE MSR, giving up\n");
++ goto exit_free;
++ }
++
++ platform_set_drvdata(pdev, data);
++
++ if ((err = sysfs_create_group(&pdev->dev.kobj, &via_cputemp_group)))
++ goto exit_free;
++
++ data->hwmon_dev = hwmon_device_register(&pdev->dev);
++ if (IS_ERR(data->hwmon_dev)) {
++ err = PTR_ERR(data->hwmon_dev);
++ dev_err(&pdev->dev, "Class registration failed (%d)\n",
++ err);
++ goto exit_class;
++ }
++
++ return 0;
++
++exit_class:
++ sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
++exit_free:
++ kfree(data);
++exit:
++ return err;
++}
++
++static int __devexit via_cputemp_remove(struct platform_device *pdev)
++{
++ struct via_cputemp_data *data = platform_get_drvdata(pdev);
++
++ hwmon_device_unregister(data->hwmon_dev);
++ sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
++ platform_set_drvdata(pdev, NULL);
++ kfree(data);
++ return 0;
++}
++
++static struct platform_driver via_cputemp_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = DRVNAME,
++ },
++ .probe = via_cputemp_probe,
++ .remove = __devexit_p(via_cputemp_remove),
++};
++
++struct pdev_entry {
++ struct list_head list;
++ struct platform_device *pdev;
++ unsigned int cpu;
++};
++
++static LIST_HEAD(pdev_list);
++static DEFINE_MUTEX(pdev_list_mutex);
++
++static int __cpuinit via_cputemp_device_add(unsigned int cpu)
++{
++ int err;
++ struct platform_device *pdev;
++ struct pdev_entry *pdev_entry;
++
++ pdev = platform_device_alloc(DRVNAME, cpu);
++ if (!pdev) {
++ err = -ENOMEM;
++ printk(KERN_ERR DRVNAME ": Device allocation failed\n");
++ goto exit;
++ }
++
++ pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
++ if (!pdev_entry) {
++ err = -ENOMEM;
++ goto exit_device_put;
++ }
++
++ err = platform_device_add(pdev);
++ if (err) {
++ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
++ err);
++ goto exit_device_free;
++ }
++
++ pdev_entry->pdev = pdev;
++ pdev_entry->cpu = cpu;
++ mutex_lock(&pdev_list_mutex);
++ list_add_tail(&pdev_entry->list, &pdev_list);
++ mutex_unlock(&pdev_list_mutex);
++
++ return 0;
++
++exit_device_free:
++ kfree(pdev_entry);
++exit_device_put:
++ platform_device_put(pdev);
++exit:
++ return err;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void via_cputemp_device_remove(unsigned int cpu)
++{
++ struct pdev_entry *p, *n;
++ mutex_lock(&pdev_list_mutex);
++ list_for_each_entry_safe(p, n, &pdev_list, list) {
++ if (p->cpu == cpu) {
++ platform_device_unregister(p->pdev);
++ list_del(&p->list);
++ kfree(p);
++ }
++ }
++ mutex_unlock(&pdev_list_mutex);
++}
++
++static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (unsigned long) hcpu;
++
++ switch (action) {
++ case CPU_ONLINE:
++ case CPU_DOWN_FAILED:
++ via_cputemp_device_add(cpu);
++ break;
++ case CPU_DOWN_PREPARE:
++ via_cputemp_device_remove(cpu);
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block via_cputemp_cpu_notifier __refdata = {
++ .notifier_call = via_cputemp_cpu_callback,
++};
++#endif /* !CONFIG_HOTPLUG_CPU */
++
++static int __init via_cputemp_init(void)
++{
++ int i, err = -ENODEV;
++ struct pdev_entry *p, *n;
++
++ if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
++ printk(KERN_DEBUG "not a VIA CPU\n");
++ goto exit;
++ }
++
++ err = platform_driver_register(&via_cputemp_driver);
++ if (err)
++ goto exit;
++
++ for_each_online_cpu(i) {
++ struct cpuinfo_x86 *c = &cpu_data(i);
++
++ if (c->x86 != 6)
++ continue;
++
++ if (c->x86_model < 0x0a)
++ continue;
++
++ if (c->x86_model > 0x0f) {
++ printk(KERN_WARNING DRVNAME ": Unknown CPU "
++ "model %x\n", c->x86_model);
++ continue;
++ }
++
++ err = via_cputemp_device_add(i);
++ if (err)
++ goto exit_devices_unreg;
++ }
++ if (list_empty(&pdev_list)) {
++ err = -ENODEV;
++ goto exit_driver_unreg;
++ }
++
++#ifdef CONFIG_HOTPLUG_CPU
++ register_hotcpu_notifier(&via_cputemp_cpu_notifier);
++#endif
++ return 0;
++
++exit_devices_unreg:
++ mutex_lock(&pdev_list_mutex);
++ list_for_each_entry_safe(p, n, &pdev_list, list) {
++ platform_device_unregister(p->pdev);
++ list_del(&p->list);
++ kfree(p);
++ }
++ mutex_unlock(&pdev_list_mutex);
++exit_driver_unreg:
++ platform_driver_unregister(&via_cputemp_driver);
++exit:
++ return err;
++}
++
++static void __exit via_cputemp_exit(void)
++{
++ struct pdev_entry *p, *n;
++#ifdef CONFIG_HOTPLUG_CPU
++ unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
++#endif
++ mutex_lock(&pdev_list_mutex);
++ list_for_each_entry_safe(p, n, &pdev_list, list) {
++ platform_device_unregister(p->pdev);
++ list_del(&p->list);
++ kfree(p);
++ }
++ mutex_unlock(&pdev_list_mutex);
++ platform_driver_unregister(&via_cputemp_driver);
++}
++
++MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
++MODULE_DESCRIPTION("VIA CPU temperature monitor");
++MODULE_LICENSE("GPL");
++
++module_init(via_cputemp_init)
++module_exit(via_cputemp_exit)
diff --git a/freed-ora/current/F-12/viafb-neuter-device-table.patch b/freed-ora/current/F-12/viafb-neuter-device-table.patch
new file mode 100644
index 000000000..359a0f5c3
--- /dev/null
+++ b/freed-ora/current/F-12/viafb-neuter-device-table.patch
@@ -0,0 +1,21 @@
+Index: linux-2.6.32.noarch/drivers/video/via/viafbdev.c
+===================================================================
+--- linux-2.6.32.noarch.orig/drivers/video/via/viafbdev.c
++++ linux-2.6.32.noarch/drivers/video/via/viafbdev.c
+@@ -2161,6 +2161,8 @@ static int __init viafb_setup(char *opti
+ #endif
+
+ static struct pci_device_id viafb_pci_table[] __devinitdata = {
++/* We don't want this driver to autoload in F11/F-12 */
++/*
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
+ .driver_data = UNICHROME_CLE266 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
+@@ -2183,6 +2185,7 @@ static struct pci_device_id viafb_pci_ta
+ .driver_data = UNICHROME_VX800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
+ .driver_data = UNICHROME_VX855 },
++*/
+ { }
+ };
+ MODULE_DEVICE_TABLE(pci, viafb_pci_table);
diff --git a/freed-ora/current/F-12/wmi-check-find_guid-return-value-to-prevent-oops.patch b/freed-ora/current/F-12/wmi-check-find_guid-return-value-to-prevent-oops.patch
new file mode 100644
index 000000000..341003c09
--- /dev/null
+++ b/freed-ora/current/F-12/wmi-check-find_guid-return-value-to-prevent-oops.patch
@@ -0,0 +1,36 @@
+From: Paul Rolland <rol@as2917.net>
+Date: Wed, 30 Dec 2009 06:19:12 +0000 (-0500)
+Subject: wmi: check find_guid() return value to prevent oops
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=b58454ec25e80fdb84e294758aeb22dd6d5ee6f9
+
+wmi: check find_guid() return value to prevent oops
+
+Signed-off-by: rol@as2917.net <Paul Rolland>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 9f93d6c..cc9ad74 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -492,8 +492,7 @@ wmi_notify_handler handler, void *data)
+ if (!guid || !handler)
+ return AE_BAD_PARAMETER;
+
+- find_guid(guid, &block);
+- if (!block)
++ if (!find_guid(guid, &block))
+ return AE_NOT_EXIST;
+
+ if (block->handler)
+@@ -521,8 +520,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
+ if (!guid)
+ return AE_BAD_PARAMETER;
+
+- find_guid(guid, &block);
+- if (!block)
++ if (!find_guid(guid, &block))
+ return AE_NOT_EXIST;
+
+ if (!block->handler)
diff --git a/freed-ora/current/F-12/wmi-survive-bios-with-duplicate-guids.patch b/freed-ora/current/F-12/wmi-survive-bios-with-duplicate-guids.patch
new file mode 100644
index 000000000..15c7be394
--- /dev/null
+++ b/freed-ora/current/F-12/wmi-survive-bios-with-duplicate-guids.patch
@@ -0,0 +1,76 @@
+From: Carlos Corbacho <carlos@strangeworlds.co.uk>
+Date: Sat, 26 Dec 2009 19:14:59 +0000 (+0000)
+Subject: ACPI: WMI: Survive BIOS with duplicate GUIDs
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=d1f9e4970742bb1e22d07b01bd44f9c357d25c42
+
+ACPI: WMI: Survive BIOS with duplicate GUIDs
+
+It would appear that in BIOS's with nVidia hooks, the GUID
+05901221-D566-11D1-B2F0-00A0C9062910 is duplicated. For now, the simplest
+solution is to just ignore any duplicate GUIDs. These particular hooks are not
+currently supported/ used in the kernel, so whoever does that can figure out
+what the 'right' solution should be (if there's a better one).
+
+http://bugzilla.kernel.org/show_bug.cgi?id=14846
+
+Signed-off-by: Carlos Corbacho <carlos@strangeworlds.co.uk>
+Reported-by: Larry Finger <Larry.Finger@lwfinger.net>
+Reported-by: Oldřich Jedlička <oldium.pro@seznam.cz>
+Signed-off-by: Len Brown <len.brown@intel.com>
+---
+
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index cc9ad74..b104302 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -714,6 +714,22 @@ static int wmi_class_init(void)
+ return ret;
+ }
+
++static bool guid_already_parsed(const char *guid_string)
++{
++ struct guid_block *gblock;
++ struct wmi_block *wblock;
++ struct list_head *p;
++
++ list_for_each(p, &wmi_blocks.list) {
++ wblock = list_entry(p, struct wmi_block, list);
++ gblock = &wblock->gblock;
++
++ if (strncmp(gblock->guid, guid_string, 16) == 0)
++ return true;
++ }
++ return false;
++}
++
+ /*
+ * Parse the _WDG method for the GUID data blocks
+ */
+@@ -723,6 +739,7 @@ static __init acpi_status parse_wdg(acpi_handle handle)
+ union acpi_object *obj;
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
++ char guid_string[37];
+ acpi_status status;
+ u32 i, total;
+
+@@ -745,6 +762,19 @@ static __init acpi_status parse_wdg(acpi_handle handle)
+ memcpy(gblock, obj->buffer.pointer, obj->buffer.length);
+
+ for (i = 0; i < total; i++) {
++ /*
++ Some WMI devices, like those for nVidia hooks, have a
++ duplicate GUID. It's not clear what we should do in this
++ case yet, so for now, we'll just ignore the duplicate.
++ Anyone who wants to add support for that device can come
++ up with a better workaround for the mess then.
++ */
++ if (guid_already_parsed(gblock[i].guid) == true) {
++ wmi_gtoa(gblock[i].guid, guid_string);
++ printk(KERN_INFO PREFIX "Skipping duplicate GUID %s\n",
++ guid_string);
++ continue;
++ }
+ wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
+ if (!wblock)
+ return AE_NO_MEMORY;
OpenPOWER on IntegriCloud