summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/mach-powertv
diff options
context:
space:
mode:
authorDavid VomLehn <dvomlehn@cisco.com>2010-05-21 11:25:36 -0700
committerRalf Baechle <ralf@linux-mips.org>2010-08-05 13:25:40 +0100
commitca36c36b7821b573fe06ce6bc34db03b557f3ce4 (patch)
tree28fe24b5080e29ca62ff4d1b4bbf5afc83084792 /arch/mips/include/asm/mach-powertv
parent36f217d9df3e6bf8e6ae7647827b485b79dbaf8e (diff)
downloadblackbird-op-linux-ca36c36b7821b573fe06ce6bc34db03b557f3ce4.tar.gz
blackbird-op-linux-ca36c36b7821b573fe06ce6bc34db03b557f3ce4.zip
MIPS: PowerTV: Use O(1) algorthm for phys_to_dma/dma_to_phys
Replace phys_to_dma()/dma_to_phys() looping algorithm with an O(1) algorithm The approach taken is inspired by the sparse memory implementation: take a certain number of high-order bits off the address them, use this as an index into a table containing an offset to the desired address and add it to the original value. There is a table for mapping physical addresses to DMA addresses and another one for the reverse mapping. The table sizes depend on how fine-grained the mappings need to be; Coarser granularity less to smaller tables. On a processor with 32-bit physical and DMA addresses, with 4 MIB granularity, memory usage is two 2048-byte arrays. Each 32-byte cache line thus covers 64 MiB of address space. Also, renames phys_to_bus() to phys_to_dma() and bus_to_phys() to dma_to_phys() to align with kernel usage. [Ralf: Fixed silly build breakage due to stackoverflow warning caused by huge array on stack.] Signed-off-by: David VomLehn <dvomlehn@cisco.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1257/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/mach-powertv')
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h8
-rw-r--r--arch/mips/include/asm/mach-powertv/ioremap.h165
2 files changed, 125 insertions, 48 deletions
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
index 5b8d5ebeb838..f76029c2406e 100644
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -65,21 +65,21 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{
if (is_kseg2(addr))
- return phys_to_bus(virt_to_phys_from_pte(addr));
+ return phys_to_dma(virt_to_phys_from_pte(addr));
else
- return phys_to_bus(virt_to_phys(addr));
+ return phys_to_dma(virt_to_phys(addr));
}
static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
struct page *page)
{
- return phys_to_bus(page_to_phys(page));
+ return phys_to_dma(page_to_phys(page));
}
static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
- return bus_to_phys(dma_addr);
+ return dma_to_phys(dma_addr);
}
static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h
index e6276d5146e8..076f2eeaa575 100644
--- a/arch/mips/include/asm/mach-powertv/ioremap.h
+++ b/arch/mips/include/asm/mach-powertv/ioremap.h
@@ -10,64 +10,101 @@
#define __ASM_MACH_POWERTV_IOREMAP_H
#include <linux/types.h>
+#include <linux/log2.h>
+#include <linux/compiler.h>
-#define LOW_MEM_BOUNDARY_PHYS 0x20000000
-#define LOW_MEM_BOUNDARY_MASK (~(LOW_MEM_BOUNDARY_PHYS - 1))
+#include <asm/pgtable-bits.h>
+#include <asm/addrspace.h>
+
+/* We're going to mess with bits, so get sizes */
+#define IOR_BPC 8 /* Bits per char */
+#define IOR_PHYS_BITS (IOR_BPC * sizeof(phys_addr_t))
+#define IOR_DMA_BITS (IOR_BPC * sizeof(dma_addr_t))
/*
- * The bus addresses are different than the physical addresses that
- * the processor sees by an offset. This offset varies by ASIC
- * version. Define a variable to hold the offset and some macros to
- * make the conversion simpler. */
-extern unsigned long phys_to_bus_offset;
-
-#ifdef CONFIG_HIGHMEM
-#define MEM_GAP_PHYS 0x60000000
+ * Define the granularity of physical/DMA mapping in terms of the number
+ * of bits that defines the offset within a grain. These will be the
+ * least significant bits of the address. The rest of a physical or DMA
+ * address will be used to index into an appropriate table to find the
+ * offset to add to the address to yield the corresponding DMA or physical
+ * address, respectively.
+ */
+#define IOR_LSBITS 22 /* Bits in a grain */
+
/*
- * TODO: We will use the hard code for conversion between physical and
- * bus until the bootloader releases their device tree to us.
+ * Compute the number of most significant address bits after removing those
+ * used for the offset within a grain and then compute the number of table
+ * entries for the conversion.
*/
-#define phys_to_bus(x) (((x) < LOW_MEM_BOUNDARY_PHYS) ? \
- ((x) + phys_to_bus_offset) : (x))
-#define bus_to_phys(x) (((x) < MEM_GAP_PHYS_ADDR) ? \
- ((x) - phys_to_bus_offset) : (x))
-#else
-#define phys_to_bus(x) ((x) + phys_to_bus_offset)
-#define bus_to_phys(x) ((x) - phys_to_bus_offset)
-#endif
+#define IOR_PHYS_MSBITS (IOR_PHYS_BITS - IOR_LSBITS)
+#define IOR_NUM_PHYS_TO_DMA ((phys_addr_t) 1 << IOR_PHYS_MSBITS)
+
+#define IOR_DMA_MSBITS (IOR_DMA_BITS - IOR_LSBITS)
+#define IOR_NUM_DMA_TO_PHYS ((dma_addr_t) 1 << IOR_DMA_MSBITS)
/*
- * Determine whether the address we are given is for an ASIC device
- * Params: addr Address to check
- * Returns: Zero if the address is not for ASIC devices, non-zero
- * if it is.
+ * Define data structures used as elements in the arrays for the conversion
+ * between physical and DMA addresses. We do some slightly fancy math to
+ * compute the width of the offset element of the conversion tables so
+ * that we can have the smallest conversion tables. Next, round up the
+ * sizes to the next higher power of two, i.e. the offset element will have
+ * 8, 16, 32, 64, etc. bits. This eliminates the need to mask off any
+ * bits. Finally, we compute a shift value that puts the most significant
+ * bits of the offset into the most significant bits of the offset element.
+ * This makes it more efficient on processors without barrel shifters and
+ * easier to see the values if the conversion table is dumped in binary.
*/
-static inline int asic_is_device_addr(phys_t addr)
+#define _IOR_OFFSET_WIDTH(n) (1 << order_base_2(n))
+#define IOR_OFFSET_WIDTH(n) \
+ (_IOR_OFFSET_WIDTH(n) < 8 ? 8 : _IOR_OFFSET_WIDTH(n))
+
+#define IOR_PHYS_OFFSET_BITS IOR_OFFSET_WIDTH(IOR_PHYS_MSBITS)
+#define IOR_PHYS_SHIFT (IOR_PHYS_BITS - IOR_PHYS_OFFSET_BITS)
+
+#define IOR_DMA_OFFSET_BITS IOR_OFFSET_WIDTH(IOR_DMA_MSBITS)
+#define IOR_DMA_SHIFT (IOR_DMA_BITS - IOR_DMA_OFFSET_BITS)
+
+struct ior_phys_to_dma {
+ dma_addr_t offset:IOR_DMA_OFFSET_BITS __packed
+ __aligned((IOR_DMA_OFFSET_BITS / IOR_BPC));
+};
+
+struct ior_dma_to_phys {
+ dma_addr_t offset:IOR_PHYS_OFFSET_BITS __packed
+ __aligned((IOR_PHYS_OFFSET_BITS / IOR_BPC));
+};
+
+extern struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
+extern struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
+
+static inline dma_addr_t _phys_to_dma_offset_raw(phys_addr_t phys)
{
- return !((phys_t)addr & (phys_t) LOW_MEM_BOUNDARY_MASK);
+ return (dma_addr_t)_ior_phys_to_dma[phys >> IOR_LSBITS].offset;
}
-/*
- * Determine whether the address we are given is external RAM mappable
- * into KSEG1.
- * Params: addr Address to check
- * Returns: Zero if the address is not for external RAM and
- */
-static inline int asic_is_lowmem_ram_addr(phys_t addr)
+static inline dma_addr_t _dma_to_phys_offset_raw(dma_addr_t dma)
{
- /*
- * The RAM always starts at the following address in the processor's
- * physical address space
- */
- static const phys_t phys_ram_base = 0x10000000;
- phys_t bus_ram_base;
+ return (dma_addr_t)_ior_dma_to_phys[dma >> IOR_LSBITS].offset;
+}
- bus_ram_base = phys_to_bus_offset + phys_ram_base;
+/* These are not portable and should not be used in drivers. Drivers should
+ * be using ioremap() and friends to map physical addreses to virtual
+ * addresses and dma_map*() and friends to map virtual addresses into DMA
+ * addresses and back.
+ */
+static inline dma_addr_t phys_to_dma(phys_addr_t phys)
+{
+ return phys + (_phys_to_dma_offset_raw(phys) << IOR_PHYS_SHIFT);
+}
- return addr >= bus_ram_base &&
- addr < (bus_ram_base + (LOW_MEM_BOUNDARY_PHYS - phys_ram_base));
+static inline phys_addr_t dma_to_phys(dma_addr_t dma)
+{
+ return dma + (_dma_to_phys_offset_raw(dma) << IOR_DMA_SHIFT);
}
+extern void ioremap_add_map(dma_addr_t phys, phys_addr_t alias,
+ dma_addr_t size);
+
/*
* Allow physical addresses to be fixed up to help peripherals located
* outside the low 32-bit range -- generic pass-through version.
@@ -77,10 +114,50 @@ static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
return phys_addr;
}
-static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size,
+/*
+ * Handle the special case of addresses the area aliased into the first
+ * 512 MiB of the processor's physical address space. These turn into either
+ * kseg0 or kseg1 addresses, depending on flags.
+ */
+static inline void __iomem *plat_ioremap(phys_t start, unsigned long size,
unsigned long flags)
{
- return NULL;
+ phys_addr_t start_offset;
+ void __iomem *result = NULL;
+
+ /* Start by checking to see whether this is an aliased address */
+ start_offset = _dma_to_phys_offset_raw(start);
+
+ /*
+ * If:
+ * o the memory is aliased into the first 512 MiB, and
+ * o the start and end are in the same RAM bank, and
+ * o we don't have a zero size or wrap around, and
+ * o we are supposed to create an uncached mapping,
+ * handle this is a kseg0 or kseg1 address
+ */
+ if (start_offset != 0) {
+ phys_addr_t last;
+ dma_addr_t dma_to_phys_offset;
+
+ last = start + size - 1;
+ dma_to_phys_offset =
+ _dma_to_phys_offset_raw(last) << IOR_DMA_SHIFT;
+
+ if (dma_to_phys_offset == start_offset &&
+ size != 0 && start <= last) {
+ phys_t adjusted_start;
+ adjusted_start = start + start_offset;
+ if (flags == _CACHE_UNCACHED)
+ result = (void __iomem *) (unsigned long)
+ CKSEG1ADDR(adjusted_start);
+ else
+ result = (void __iomem *) (unsigned long)
+ CKSEG0ADDR(adjusted_start);
+ }
+ }
+
+ return result;
}
static inline int plat_iounmap(const volatile void __iomem *addr)
OpenPOWER on IntegriCloud