summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-10-17 17:14:26 -0400
committerDave Airlie <airlied@redhat.com>2011-12-06 10:39:51 +0000
commit3230cfc34fca9d17c1628cf0e4ac25199592a69a (patch)
tree40685914703f0a709b2180d7cdf01e770fa5a4dc /drivers/gpu
parentc52494f69538f6fe1a234972f024011b17a48329 (diff)
downloadblackbird-obmc-linux-3230cfc34fca9d17c1628cf0e4ac25199592a69a.tar.gz
blackbird-obmc-linux-3230cfc34fca9d17c1628cf0e4ac25199592a69a.zip
drm/nouveau: enable the ttm dma pool when swiotlb is active V3
If the card is capable of more than 32-bit, then use the default TTM page pool code which allocates from anywhere in the memory. Note: If the 'ttm.no_dma' parameter is set, the override is ignored and the default TTM pool is used. V2 use pci_set_consistent_dma_mask V3 Rebase on top of no memory account changes (where/when is my delorean when i need it ?) CC: Ben Skeggs <bskeggs@redhat.com> CC: Francisco Jerez <currojerez@riseup.net> CC: Dave Airlie <airlied@redhat.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c73
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c60
4 files changed, 79 insertions, 61 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f19ac42578bb..2dc0d8303cb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1049,10 +1049,79 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
nouveau_fence_unref(&old_fence);
}
+static int
+nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct drm_nouveau_private *dev_priv;
+ struct drm_device *dev;
+ unsigned i;
+ int r;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ dev_priv = nouveau_bdev(ttm->bdev);
+ dev = dev_priv->dev;
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ return ttm_dma_populate(ttm, dev->dev);
+ }
+#endif
+
+ r = ttm_pool_populate(ttm);
+ if (r) {
+ return r;
+ }
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
+ while (--i) {
+ pci_unmap_page(dev->pdev, ttm->dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ttm->dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ struct drm_nouveau_private *dev_priv;
+ struct drm_device *dev;
+ unsigned i;
+
+ dev_priv = nouveau_bdev(ttm->bdev);
+ dev = dev_priv->dev;
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ ttm_dma_unpopulate(ttm, dev->dev);
+ return;
+ }
+#endif
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (ttm->dma_address[i]) {
+ pci_unmap_page(dev->pdev, ttm->dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+
+ ttm_pool_unpopulate(ttm);
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
- .ttm_tt_populate = &ttm_pool_populate,
- .ttm_tt_unpopulate = &ttm_pool_unpopulate,
+ .ttm_tt_populate = &nouveau_ttm_tt_populate,
+ .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 8e1592368cce..f52c2db3529e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
+ { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 36bec4807701..37fcaa260e98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
return ret;
+ ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+ if (ret) {
+ /* Reset to default value. */
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+ }
+
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index bc2ab900b24c..ee1eb7cba798 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -13,41 +13,6 @@ struct nouveau_sgdma_be {
u64 offset;
};
-static int
-nouveau_sgdma_dma_map(struct ttm_tt *ttm)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct drm_device *dev = nvbe->dev;
- int i;
-
- for (i = 0; i < ttm->num_pages; i++) {
- ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
-static void
-nouveau_sgdma_dma_unmap(struct ttm_tt *ttm)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct drm_device *dev = nvbe->dev;
- int i;
-
- for (i = 0; i < ttm->num_pages; i++) {
- if (ttm->dma_address[i]) {
- pci_unmap_page(dev->pdev, ttm->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- ttm->dma_address[i] = 0;
- }
-}
-
static void
nouveau_sgdma_destroy(struct ttm_tt *ttm)
{
@@ -67,13 +32,8 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned i, j, pte;
- int r;
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
- r = nouveau_sgdma_dma_map(ttm);
- if (r) {
- return r;
- }
nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
@@ -110,7 +70,6 @@ nv04_sgdma_unbind(struct ttm_tt *ttm)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
- nouveau_sgdma_dma_unmap(ttm);
return 0;
}
@@ -141,13 +100,8 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
dma_addr_t *list = ttm->dma_address;
u32 pte = mem->start << 2;
u32 cnt = ttm->num_pages;
- int r;
nvbe->offset = mem->start << PAGE_SHIFT;
- r = nouveau_sgdma_dma_map(ttm);
- if (r) {
- return r;
- }
while (cnt--) {
nv_wo32(pgt, pte, (*list++ >> 7) | 1);
@@ -173,7 +127,6 @@ nv41_sgdma_unbind(struct ttm_tt *ttm)
}
nv41_sgdma_flush(nvbe);
- nouveau_sgdma_dma_unmap(ttm);
return 0;
}
@@ -256,13 +209,9 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
dma_addr_t *list = ttm->dma_address;
u32 pte = mem->start << 2, tmp[4];
u32 cnt = ttm->num_pages;
- int i, r;
+ int i;
nvbe->offset = mem->start << PAGE_SHIFT;
- r = nouveau_sgdma_dma_map(ttm);
- if (r) {
- return r;
- }
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
@@ -321,7 +270,6 @@ nv44_sgdma_unbind(struct ttm_tt *ttm)
nv44_sgdma_fill(pgt, NULL, pte, cnt);
nv44_sgdma_flush(ttm);
- nouveau_sgdma_dma_unmap(ttm);
return 0;
}
@@ -335,13 +283,8 @@ static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_mem *node = mem->mm_node;
- int r;
/* noop: bound in move_notify() */
- r = nouveau_sgdma_dma_map(ttm);
- if (r) {
- return r;
- }
node->pages = ttm->dma_address;
return 0;
}
@@ -350,7 +293,6 @@ static int
nv50_sgdma_unbind(struct ttm_tt *ttm)
{
/* noop: unbound in move_notify() */
- nouveau_sgdma_dma_unmap(ttm);
return 0;
}
OpenPOWER on IntegriCloud