drm/i915/execlist: Trim immediate timeslice expiry

We perform timeslicing immediately upon receipt of a request that may be
put into the second ELSP slot. The idea behind this was that since we
didn't install the timer if the second ELSP slot was empty, we would not
have any idea of how long ELSP[0] had been running and so giving the
newcomer a chance on the GPU was fair. However, this causes us extra
busy work that we may be able to avoid if we wait a jiffie for the first
timeslice as normal.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191016100851.4979-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-10-16 11:08:51 +01:00
parent 8574685547
commit 2229adc813
3 changed files with 39 additions and 11 deletions

View file

@ -1241,6 +1241,17 @@ static struct intel_timeline *get_timeline(struct i915_request *rq)
return tl;
}
static const char *repr_timer(const struct timer_list *t)
{
if (!READ_ONCE(t->expires))
return "inactive";
if (timer_pending(t))
return "active";
return "expired";
}
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
@ -1302,19 +1313,20 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
unsigned int idx;
u8 read, write;
drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
num_entries);
drm_printf(m, "\tExeclist tasklet queued? %s (%s), timeslice? %s\n",
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
repr_timer(&engine->execlists.timer));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
read, write,
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
read, write, num_entries);
if (read >= num_entries)
read = 0;
if (write >= num_entries)

View file

@ -1473,7 +1473,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL;
} else if (need_timeslice(engine, last) &&
!timer_pending(&engine->execlists.timer)) {
timer_expired(&engine->execlists.timer)) {
GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
engine->name,
last->fence.context,
@ -1932,6 +1932,8 @@ static void process_csb(struct intel_engine_cs *engine)
if (enable_timeslice(execlists))
mod_timer(&execlists->timer, jiffies + 1);
else
cancel_timer(&execlists->timer);
WRITE_ONCE(execlists->pending[0], NULL);
} else {
@ -3452,7 +3454,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
static void execlists_park(struct intel_engine_cs *engine)
{
del_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.timer);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)

View file

@ -112,4 +112,18 @@ static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
return test_bit(TASKLET_STATE_SCHED, &t->state);
}
static inline void cancel_timer(struct timer_list *t)
{
if (!READ_ONCE(t->expires))
return;
del_timer(t);
WRITE_ONCE(t->expires, 0);
}
static inline bool timer_expired(const struct timer_list *t)
{
return READ_ONCE(t->expires) && !timer_pending(t);
}
#endif /* __I915_GEM_H__ */