diff options
author | Jake Moilanen <moilanen@austin.ibm.com> | 2007-03-29 08:44:02 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-04-13 03:55:13 +1000 |
commit | 569975591c5530fdc9c7a3c45122e5e46f075a74 (patch) | |
tree | 47908de09d2e7d55d82a3b9d2e47b34b2c7f8133 /arch/powerpc/kernel/iommu.c | |
parent | 1f9209cfe06be715b82075e79c9aab3c5b714010 (diff) | |
download | blackbird-op-linux-569975591c5530fdc9c7a3c45122e5e46f075a74.tar.gz blackbird-op-linux-569975591c5530fdc9c7a3c45122e5e46f075a74.zip |
[POWERPC] DMA 4GB boundary protection
There are many adapters which cannot handle DMAing across any 4 GB
boundary. For instance, the latest Emulex adapters.
This normally is not an issue as firmware gives dma-windows under
4gigs. However, some of the new System-P boxes have dma-windows above
4gigs, and this present a problem.
During initialization of the IOMMU tables, the last entry at each 4GB
boundary is marked as used. Thus no mappings can cross the boundary.
If a table ends at a 4GB boundary, the entry is not marked as used.
A boot option to remove this 4GB protection is given w/ protect4gb=off.
This exposes the potential issue for driver and hardware development
purposes.
Signed-off-by: Jake Moilanen <moilanen@austin.ibm.com>
Acked-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index c50d7072f305..d2598e2e7bbe 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -47,6 +47,8 @@ static int novmerge = 0; static int novmerge = 1; #endif +static int protect4gb = 1; + static inline unsigned long iommu_num_pages(unsigned long vaddr, unsigned long slen) { @@ -58,6 +60,16 @@ static inline unsigned long iommu_num_pages(unsigned long vaddr, return npages; } +static int __init setup_protect4gb(char *str) +{ + if (strcmp(str, "on") == 0) + protect4gb = 1; + else if (strcmp(str, "off") == 0) + protect4gb = 0; + + return 1; +} + static int __init setup_iommu(char *str) { if (!strcmp(str, "novmerge")) @@ -67,6 +79,7 @@ static int __init setup_iommu(char *str) return 1; } +__setup("protect4gb=", setup_protect4gb); __setup("iommu=", setup_iommu); static unsigned long iommu_range_alloc(struct iommu_table *tbl, @@ -439,6 +452,9 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) { unsigned long sz; + unsigned long start_index, end_index; + unsigned long entries_per_4g; + unsigned long index; static int welcomed = 0; struct page *page; @@ -460,7 +476,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) #ifdef CONFIG_CRASH_DUMP if (ppc_md.tce_get) { - unsigned long index, tceval; + unsigned long tceval; unsigned long tcecount = 0; /* @@ -490,6 +506,23 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); #endif + /* + * DMA cannot cross 4 GB boundary. Mark last entry of each 4 + * GB chunk as reserved. + */ + if (protect4gb) { + entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT; + + /* Mark the last bit before a 4GB boundary as used */ + start_index = tbl->it_offset | (entries_per_4g - 1); + start_index -= tbl->it_offset; + + end_index = tbl->it_size; + + for (index = start_index; index < end_index - 1; index += entries_per_4g) + __set_bit(index, tbl->it_map); + } + if (!welcomed) { printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", novmerge ? "disabled" : "enabled"); |