summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-gart_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r--arch/x86/kernel/pci-gart_64.c94
1 files changed, 51 insertions, 43 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 4918c575d582..06bcba536045 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -8,6 +8,7 @@
* See Documentation/DMA-mapping.txt for the interface specification.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
+ * Subject to the GNU General Public License v2 only.
*/
#include <linux/types.h>
@@ -23,22 +24,23 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/kdebug.h>
+#include <linux/scatterlist.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
-#include <asm/iommu.h>
+#include <asm/gart.h>
#include <asm/cacheflush.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
#include <asm/k8.h>
-unsigned long iommu_bus_base; /* GART remapping area (physical) */
+static unsigned long iommu_bus_base; /* GART remapping area (physical) */
static unsigned long iommu_size; /* size of remapping area bytes */
static unsigned long iommu_pages; /* .. and in pages */
-u32 *iommu_gatt_base; /* Remapping table */
+static u32 *iommu_gatt_base; /* Remapping table */
/* If this is disabled the IOMMU will use an optimized flushing strategy
of only flushing when an mapping is reused. With it true the GART is flushed
@@ -133,8 +135,8 @@ static void flush_gart(void)
/* Debugging aid for drivers that don't free their IOMMU tables */
static void **iommu_leak_tab;
static int leak_trace;
-int iommu_leak_pages = 20;
-void dump_leak(void)
+static int iommu_leak_pages = 20;
+static void dump_leak(void)
{
int i;
static int dump;
@@ -278,10 +280,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
*/
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
{
+ struct scatterlist *s;
int i;
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
+ for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length)
break;
gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
@@ -292,15 +294,15 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
int nents, int dir)
{
+ struct scatterlist *s;
int i;
#ifdef CONFIG_IOMMU_DEBUG
printk(KERN_DEBUG "dma_map_sg overflow\n");
#endif
- for (i = 0; i < nents; i++ ) {
- struct scatterlist *s = &sg[i];
- unsigned long addr = page_to_phys(s->page) + s->offset;
+ for_each_sg(sg, s, nents, i) {
+ unsigned long addr = sg_phys(s);
if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir);
if (addr == bad_dma_address) {
@@ -319,24 +321,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
}
/* Map multiple scatterlist entries continuous into the first. */
-static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
+static int __dma_map_cont(struct scatterlist *start, int nelems,
struct scatterlist *sout, unsigned long pages)
{
unsigned long iommu_start = alloc_iommu(pages);
unsigned long iommu_page = iommu_start;
+ struct scatterlist *s;
int i;
if (iommu_start == -1)
return -1;
-
- for (i = start; i < stopat; i++) {
- struct scatterlist *s = &sg[i];
+
+ for_each_sg(start, s, nelems, i) {
unsigned long pages, addr;
unsigned long phys_addr = s->dma_address;
- BUG_ON(i > start && s->offset);
- if (i == start) {
- *sout = *s;
+ BUG_ON(s != start && s->offset);
+ if (s == start) {
sout->dma_address = iommu_bus_base;
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
sout->dma_length = s->length;
@@ -357,30 +358,32 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
return 0;
}
-static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
+static inline int dma_map_cont(struct scatterlist *start, int nelems,
struct scatterlist *sout,
unsigned long pages, int need)
{
- if (!need) {
- BUG_ON(stopat - start != 1);
- *sout = sg[start];
- sout->dma_length = sg[start].length;
+ if (!need) {
+ BUG_ON(nelems != 1);
+ sout->dma_address = start->dma_address;
+ sout->dma_length = start->length;
return 0;
- }
- return __dma_map_cont(sg, start, stopat, sout, pages);
+ }
+ return __dma_map_cont(start, nelems, sout, pages);
}
/*
* DMA map all entries in a scatterlist.
* Merge chunks that have page aligned sizes into a continuous mapping.
*/
-int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ int dir)
{
int i;
int out;
int start;
unsigned long pages = 0;
int need = 0, nextneed;
+ struct scatterlist *s, *ps, *start_sg, *sgmap;
if (nents == 0)
return 0;
@@ -390,9 +393,10 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
out = 0;
start = 0;
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
- dma_addr_t addr = page_to_phys(s->page) + s->offset;
+ start_sg = sgmap = sg;
+ ps = NULL; /* shut up gcc */
+ for_each_sg(sg, s, nents, i) {
+ dma_addr_t addr = sg_phys(s);
s->dma_address = addr;
BUG_ON(s->length == 0);
@@ -400,34 +404,38 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
/* Handle the previous not yet processed entries */
if (i > start) {
- struct scatterlist *ps = &sg[i-1];
/* Can only merge when the last chunk ends on a page
boundary and the new one doesn't have an offset. */
if (!iommu_merge || !nextneed || !need || s->offset ||
- (ps->offset + ps->length) % PAGE_SIZE) {
- if (dma_map_cont(sg, start, i, sg+out, pages,
- need) < 0)
+ (ps->offset + ps->length) % PAGE_SIZE) {
+ if (dma_map_cont(start_sg, i - start, sgmap,
+ pages, need) < 0)
goto error;
out++;
+ sgmap = sg_next(sgmap);
pages = 0;
- start = i;
+ start = i;
+ start_sg = s;
}
}
need = nextneed;
pages += to_pages(s->offset, s->length);
+ ps = s;
}
- if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
+ if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
goto error;
out++;
flush_gart();
- if (out < nents)
- sg[out].dma_length = 0;
+ if (out < nents) {
+ sgmap = sg_next(sgmap);
+ sgmap->dma_length = 0;
+ }
return out;
error:
flush_gart();
- gart_unmap_sg(dev, sg, nents, dir);
+ gart_unmap_sg(dev, sg, out, dir);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
out = dma_map_sg_nonforce(dev, sg, nents, dir);
@@ -437,8 +445,8 @@ error:
if (panic_on_overflow)
panic("dma_map_sg: overflow on %lu pages\n", pages);
iommu_full(dev, pages << PAGE_SHIFT, dir);
- for (i = 0; i < nents; i++)
- sg[i].dma_address = bad_dma_address;
+ for_each_sg(sg, s, nents, i)
+ s->dma_address = bad_dma_address;
return 0;
}
@@ -619,12 +627,12 @@ void __init gart_iommu_init(void)
return;
/* Did we detect a different HW IOMMU? */
- if (iommu_detected && !iommu_aperture)
+ if (iommu_detected && !gart_iommu_aperture)
return;
if (no_iommu ||
(!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
- !iommu_aperture ||
+ !gart_iommu_aperture ||
(no_agp && init_k8_gatt(&info) < 0)) {
if (end_pfn > MAX_DMA32_PFN) {
printk(KERN_ERR "WARNING more than 4GB of memory "
@@ -725,9 +733,9 @@ void __init gart_parse_options(char *p)
fix_aperture = 0;
/* duplicated from pci-dma.c */
if (!strncmp(p,"force",5))
- iommu_aperture_allowed = 1;
+ gart_iommu_aperture_allowed = 1;
if (!strncmp(p,"allowed",7))
- iommu_aperture_allowed = 1;
+ gart_iommu_aperture_allowed = 1;
if (!strncmp(p, "memaper", 7)) {
fallback_aper_force = 1;
p += 7;
OpenPOWER on IntegriCloud