drm/radeon: Move looping over the PTEs into chip code

Makes it easier to move it into the rings.

Signed-off-by: Christian König <deathsimple@vodafone.de>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
This commit is contained in:
Christian König 2012-08-11 11:54:05 +02:00 committed by Alex Deucher
parent ddf03f5cdd
commit 089a786e2c
5 changed files with 34 additions and 34 deletions

View file

@ -1503,9 +1503,7 @@ void cayman_vm_fini(struct radeon_device *rdev)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
struct radeon_vm *vm,
uint32_t flags)
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
{
uint32_t r600_flags = 0;
@ -1520,13 +1518,23 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
}
void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
unsigned pfn, uint64_t addr, uint32_t flags)
unsigned pfn, struct ttm_mem_reg *mem,
unsigned npages, uint32_t flags)
{
void __iomem *ptr = (void *)vm->pt;
uint64_t addr;
int i;
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= flags;
writeq(addr, ptr + (pfn * 8));
addr = flags = cayman_vm_page_flags(rdev, flags);
for (i = 0; i < npages; ++i, ++pfn) {
if (mem) {
addr = radeon_vm_get_addr(rdev, mem, i);
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= flags;
}
writeq(addr, ptr + (pfn * 8));
}
}
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)

View file

@ -1135,11 +1135,9 @@ struct radeon_asic {
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
uint32_t (*page_flags)(struct radeon_device *rdev,
struct radeon_vm *vm,
uint32_t flags);
void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
unsigned pfn, uint64_t addr, uint32_t flags);
unsigned pfn, struct ttm_mem_reg *mem,
unsigned npages, uint32_t flags);
} vm;
/* ring specific callbacks */
struct {
@ -1751,8 +1749,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_page_flags(rdev, v, flags) (rdev)->asic->vm.page_flags((rdev), (v), (flags))
#define radeon_asic_vm_set_page(rdev, v, pfn, addr, flags) (rdev)->asic->vm.set_page((rdev), (v), (pfn), (addr), (flags))
#define radeon_asic_vm_set_page(rdev, v, pfn, mem, npages, flags) (rdev)->asic->vm.set_page((rdev), (v), (pfn), (mem), (npages), (flags))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
@ -1837,6 +1834,9 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
u64 radeon_vm_get_addr(struct radeon_device *rdev,
struct ttm_mem_reg *mem,
unsigned pfn);
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,

View file

@ -1375,7 +1375,6 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.page_flags = &cayman_vm_page_flags,
.set_page = &cayman_vm_set_page,
},
.ring = {
@ -1479,7 +1478,6 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.page_flags = &cayman_vm_page_flags,
.set_page = &cayman_vm_set_page,
},
.ring = {
@ -1583,7 +1581,6 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
.page_flags = &cayman_vm_page_flags,
.set_page = &cayman_vm_set_page,
},
.ring = {

View file

@ -442,11 +442,10 @@ int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
struct radeon_vm *vm,
uint32_t flags);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
unsigned pfn, uint64_t addr, uint32_t flags);
unsigned pfn, struct ttm_mem_reg *mem,
unsigned npages, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
/* DCE6 - SI */

View file

@ -450,7 +450,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
r = radeon_asic_vm_init(rdev);
if (r)
return r;
rdev->vm_manager.enabled = true;
r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
@ -773,9 +773,9 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
* to (cayman+).
* Returns the physical address of the page.
*/
static u64 radeon_vm_get_addr(struct radeon_device *rdev,
struct ttm_mem_reg *mem,
unsigned pfn)
u64 radeon_vm_get_addr(struct radeon_device *rdev,
struct ttm_mem_reg *mem,
unsigned pfn)
{
u64 addr = 0;
@ -819,9 +819,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct ttm_mem_reg *mem)
{
struct radeon_bo_va *bo_va;
unsigned ngpu_pages, i;
uint64_t addr = 0, pfn;
uint32_t flags;
unsigned ngpu_pages;
uint64_t pfn;
/* nothing to do if vm isn't bound */
if (vm->sa_bo == NULL)
@ -848,14 +847,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
}
}
pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
flags = radeon_asic_vm_page_flags(rdev, bo_va->vm, bo_va->flags);
for (i = 0, addr = 0; i < ngpu_pages; i++) {
if (mem && bo_va->valid) {
addr = radeon_vm_get_addr(rdev, mem, i);
}
radeon_asic_vm_set_page(rdev, bo_va->vm, i + pfn, addr, flags);
if (!bo_va->valid) {
mem = NULL;
}
pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
radeon_asic_vm_set_page(rdev, bo_va->vm, pfn, mem, ngpu_pages, bo_va->flags);
radeon_fence_unref(&vm->last_flush);
return 0;
}