drm/vmwgfx: Add DX query support. Various fixes.

Add support for vgpu10 queries. Functional- and formatting fixes.

Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
This commit is contained in:
Sinclair Yeh 2015-08-10 10:56:15 -07:00 committed by Thomas Hellstrom
parent 0fca749e9a
commit fd11a3c0bd
5 changed files with 373 additions and 26 deletions

View file

@ -817,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
/**
* vmw_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @mem: The truct ttm_mem_reg indicating to what memory
* region the move is taking place.
* @bo: The TTM buffer object about to move.
* @mem: The struct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
* Calls move_notify for all subsystems needing it.
* (currently only resources).
@ -828,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
vmw_resource_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
}
/**
* vmw_swap_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to be swapped out.
* @bo: The TTM buffer object about to be swapped out.
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{

View file

@ -121,7 +121,9 @@ static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
res = uctx->cotables[i];
uctx->cotables[i] = NULL;
spin_unlock(&uctx->cotable_lock);
vmw_resource_unreference(&res);
if (res)
vmw_resource_unreference(&res);
}
}
@ -585,6 +587,8 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
struct {
SVGA3dCmdHeader header;
@ -603,6 +607,13 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_context_scrub_cotables(res, readback);
if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
readback) {
WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
if (vmw_query_readback_all(uctx->dx_query_mob))
DRM_ERROR("Failed to read back query states\n");
}
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
@ -692,6 +703,9 @@ static void vmw_user_context_free(struct vmw_resource *res)
if (ctx->cbs)
vmw_binding_state_free(ctx->cbs);
(void) vmw_context_bind_dx_query(res, NULL);
ttm_base_object_kfree(ctx, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
@ -867,3 +881,57 @@ vmw_context_binding_state(struct vmw_resource *ctx)
{
return container_of(ctx, struct vmw_user_context, res)->cbs;
}
/**
* vmw_context_bind_dx_query -
* Sets query MOB for the context. If @mob is NULL, then this function will
* remove the association between the MOB and the context. This function
* assumes the binding_mutex is held.
*
* @ctx_res: The context resource
* @mob: a reference to the query MOB
*
* Returns -EINVAL if a MOB has already been set and does not match the one
* specified in the parameter. 0 otherwise.
*/
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_dma_buffer *mob)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
if (mob == NULL) {
if (uctx->dx_query_mob) {
uctx->dx_query_mob->dx_query_ctx = NULL;
vmw_dmabuf_unreference(&uctx->dx_query_mob);
uctx->dx_query_mob = NULL;
}
return 0;
}
/* Can only have one MOB per context for queries */
if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
return -EINVAL;
mob->dx_query_ctx = ctx_res;
if (!uctx->dx_query_mob)
uctx->dx_query_mob = vmw_dmabuf_reference(mob);
return 0;
}
/**
* vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
*
* @ctx_res: The context resource
*/
struct vmw_dma_buffer *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
return uctx->dx_query_mob;
}

View file

@ -88,6 +88,8 @@ struct vmw_dma_buffer {
struct ttm_buffer_object base;
struct list_head res_list;
s32 pin_count;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
};
/**
@ -658,6 +660,9 @@ extern void vmw_resource_unreserve(struct vmw_resource *res,
unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
@ -1011,6 +1016,11 @@ extern struct vmw_ctx_binding_state *
vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_dma_buffer *mob);
extern struct vmw_dma_buffer *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
/*
* Surface management - vmwgfx_surface.c

View file

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -101,19 +101,32 @@ struct vmw_cmd_entry {
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource *ctx);
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_dma_buffer **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* vmw_resources_unreserve - unreserve resources previously reserved for
* command submission.
*
* @list_head: list of resources to unreserve.
* @sw_context: pointer to the software context
* @backoff: Whether command submission failed.
*/
static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context,
struct list_head *list,
bool backoff)
static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
bool backoff)
{
struct vmw_resource_val_node *val;
struct list_head *list = &sw_context->resource_list;
if (sw_context->dx_query_mob && !backoff)
vmw_context_bind_dx_query(sw_context->dx_query_ctx,
sw_context->dx_query_mob);
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
@ -376,6 +389,16 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
break;
}
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
struct vmw_dma_buffer *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
ret = vmw_bo_to_validate_list(sw_context,
dx_query_mob,
true, NULL);
}
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
@ -533,7 +556,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
int ret = 0;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
@ -554,7 +577,18 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
}
return 0;
if (sw_context->dx_query_mob) {
struct vmw_dma_buffer *expected_dx_query_mob;
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
if (expected_dx_query_mob &&
expected_dx_query_mob != sw_context->dx_query_mob) {
ret = -EINVAL;
}
}
return ret;
}
/**
@ -724,6 +758,46 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return ret;
}
/**
* vmw_rebind_dx_query - Rebind DX query associated with the context
*
* @ctx_res: context the query belongs to
*
* This function assumes binding_mutex is held.
*/
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
struct vmw_dma_buffer *dx_query_mob;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindAllQuery body;
} *cmd;
dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
if (!dx_query_mob || dx_query_mob->dx_query_ctx)
return 0;
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
if (cmd == NULL) {
DRM_ERROR("Failed to rebind queries.\n");
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
cmd->body.mobid = dx_query_mob->base.mem.start;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
return 0;
}
/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
@ -748,6 +822,10 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
ret = vmw_rebind_all_dx_query(val->res);
if (ret != 0)
return ret;
}
return 0;
@ -1248,6 +1326,98 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return ret;
}
/**
* vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*
* This function adds the new query into the query COTABLE
*/
static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dx_define_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDXDefineQuery q;
} *cmd;
int ret;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *cotable_res;
if (ctx_node == NULL) {
DRM_ERROR("DX Context not set for query.\n");
return -EINVAL;
}
cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
vmw_resource_unreference(&cotable_res);
return ret;
}
/**
* vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*
* The query bind operation will eventually associate the query ID
* with its backing MOB. In this function, we take the user mode
* MOB ID and use vmw_translate_mob_ptr() to translate it to its
* kernel mode equivalent.
*/
static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dx_bind_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindQuery q;
} *cmd;
struct vmw_dma_buffer *vmw_bo;
int ret;
cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
/*
* Look up the buffer pointed to by q.mobid, put it on the relocation
* list so its kernel mode MOB ID can be filled in later
*/
ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
&vmw_bo);
if (ret != 0)
return ret;
sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
/**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
@ -2975,6 +3145,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
@ -3097,15 +3269,17 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
&vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid,
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid,
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid,
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid,
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
&vmw_cmd_ok, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid,
VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
true, false, true),
@ -3780,6 +3954,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
sw_context->dx_ctx_node = NULL;
sw_context->dx_query_mob = NULL;
sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
@ -3803,7 +3979,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
/*
* Merge the resource lists before checking the return status
* from vmd_cmd_check_all so that all the open hashtabs will
@ -3869,8 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
false);
vmw_resources_unreserve(sw_context, false);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
@ -3908,8 +4082,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
out_err_nores:
vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
true);
vmw_resources_unreserve(sw_context, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);

View file

@ -1451,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
/**
* vmw_resource_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @mem: The truct ttm_mem_reg indicating to what memory
* region the move is taking place.
* @bo: The TTM buffer object about to move.
* @mem: The struct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory.
@ -1503,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
}
}
/**
* vmw_query_readback_all - Read back cached query states
*
* @dx_query_mob: Buffer containing the DX query MOB
*
* Read back cached states from the device if they exist. This function
* assumings binding_mutex is held.
*/
int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXReadbackAllQuery body;
} *cmd;
/* No query bound, so do nothing */
if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
return 0;
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for "
"query MOB read back.\n");
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = dx_query_ctx->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
/* Triggers a rebind the next time affected context is bound */
dx_query_mob->dx_query_ctx = NULL;
return 0;
}
/**
* vmw_query_move_notify - Read back cached query states
*
* @bo: The TTM buffer object about to move.
* @mem: The memory region @bo is moving to.
*
* Called before the query MOB is swapped out to read back cached query
* states from the device.
*/
void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
struct vmw_dma_buffer *dx_query_mob;
struct ttm_bo_device *bdev = bo->bdev;
struct vmw_private *dev_priv;
dev_priv = container_of(bdev, struct vmw_private, bdev);
mutex_lock(&dev_priv->binding_mutex);
dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
}
/* If BO is being moved from MOB to system memory */
if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
struct vmw_fence_obj *fence;
(void) vmw_query_readback_all(dx_query_mob);
mutex_unlock(&dev_priv->binding_mutex);
/* Create a fence and attach the BO to it */
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_fence_single_bo(bo, fence);
if (fence != NULL)
vmw_fence_obj_unreference(&fence);
(void) ttm_bo_wait(bo, false, false, false);
} else
mutex_unlock(&dev_priv->binding_mutex);
}
/**
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
*