drm/amdgpu: switch the GTT backend to self alloc

Similar to the TTM range manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-6-christian.koenig@amd.com
This commit is contained in:
Christian König 2021-04-23 18:51:46 +02:00
parent 2fdcb55dfc
commit f700b18c85

View file

@ -22,11 +22,13 @@
* Authors: Christian König
*/
#include <drm/ttm/ttm_range_manager.h>
#include "amdgpu.h"
struct amdgpu_gtt_node {
struct drm_mm_node node;
struct ttm_buffer_object *tbo;
struct ttm_range_mgr_node base;
};
static inline struct amdgpu_gtt_mgr *
@ -38,7 +40,8 @@ to_gtt_mgr(struct ttm_resource_manager *man)
static inline struct amdgpu_gtt_node *
to_amdgpu_gtt_node(struct ttm_resource *res)
{
return container_of(res->mm_node, struct amdgpu_gtt_node, node);
return container_of(res->mm_node, struct amdgpu_gtt_node,
base.mm_nodes[0]);
}
/**
@ -107,7 +110,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
{
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
return drm_mm_node_allocated(&node->node);
return drm_mm_node_allocated(&node->base.mm_nodes[0]);
}
/**
@ -138,16 +141,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
if (!node) {
r = -ENOMEM;
goto err_out;
}
node->tbo = tbo;
ttm_resource_init(tbo, place, &node->base.base);
if (place->lpfn) {
spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node,
r = drm_mm_insert_node_in_range(&mgr->mm,
&node->base.mm_nodes[0],
mem->num_pages,
tbo->page_alignment, 0,
place->fpfn, place->lpfn,
@ -156,14 +162,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
if (unlikely(r))
goto err_free;
mem->start = node->node.start;
mem->start = node->base.mm_nodes[0].start;
} else {
node->node.start = 0;
node->node.size = mem->num_pages;
node->base.mm_nodes[0].start = 0;
node->base.mm_nodes[0].size = mem->num_pages;
mem->start = AMDGPU_BO_INVALID_OFFSET;
}
mem->mm_node = &node->node;
mem->mm_node = &node->base.mm_nodes[0];
return 0;
err_free:
@ -186,15 +192,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *mem)
{
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node;
if (!node)
if (!mem->mm_node)
return;
node = to_amdgpu_gtt_node(mem);
spin_lock(&mgr->lock);
if (drm_mm_node_allocated(&node->node))
drm_mm_remove_node(&node->node);
if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->base.mm_nodes[0]);
spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available);
@ -232,7 +240,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
node = container_of(mm_node, struct amdgpu_gtt_node, node);
node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
r = amdgpu_ttm_recover_gart(node->tbo);
if (r)
break;