diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index b21c20ee9e23998bb4e1f81b93dd74c836c6f2f0..d4fd56f1e65eaf46b9902ee06a1dd0726e386c1b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine, bool intel_engines_are_idle(struct intel_gt *gt); bool intel_engine_is_idle(struct intel_engine_cs *engine); -void intel_engine_flush_submission(struct intel_engine_cs *engine); +bool intel_engine_flush_submission(struct intel_engine_cs *engine); void intel_engines_reset_default_submission(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 0c4c07072473ff9eccd62421aa65b8742b0cb543..5309c61ad5272ca23dad658b606a1d327c5bcc8f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1079,9 +1079,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return idle; } -void intel_engine_flush_submission(struct intel_engine_cs *engine) +bool intel_engine_flush_submission(struct intel_engine_cs *engine) { struct tasklet_struct *t = &engine->execlists.tasklet; + bool active = tasklet_is_locked(t); if (__tasklet_is_scheduled(t)) { local_bh_disable(); @@ -1092,10 +1093,13 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine) tasklet_unlock(t); } local_bh_enable(); + active = true; } /* Otherwise flush the tasklet if it was running on another cpu */ tasklet_unlock_wait(t); + + return active; } /** diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 063f863ee21c185669dd2a1802996de6d4cb2460..b4f04614230e4228cfa47373abfec91289d3e680 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -23,15 +23,18 @@ static void retire_requests(struct intel_timeline *tl) break; } -static void flush_submission(struct intel_gt *gt) +static bool flush_submission(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; + bool active = false; for_each_engine(engine, gt, id) { - intel_engine_flush_submission(engine); - flush_work(&engine->retire_work); + active |= intel_engine_flush_submission(engine); + active |= flush_work(&engine->retire_work); } + + return active; } static void engine_retire(struct work_struct *work) @@ -120,9 +123,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { - active_count++; /* report busy to caller, try again? */ - if (!mutex_trylock(&tl->mutex)) + if (!mutex_trylock(&tl->mutex)) { + active_count++; /* report busy to caller, try again? */ continue; + } intel_timeline_get(tl); GEM_BUG_ON(!atomic_read(&tl->active_count)); @@ -147,10 +151,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) /* Resume iteration after dropping lock */ list_safe_reset_next(tl, tn, link); - if (atomic_dec_and_test(&tl->active_count)) { + if (atomic_dec_and_test(&tl->active_count)) list_del(&tl->link); - active_count--; - } + else + active_count += i915_active_fence_isset(&tl->last_request); mutex_unlock(&tl->mutex); @@ -165,7 +169,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); - flush_submission(gt); + if (flush_submission(gt)) + active_count++; return active_count ? timeout : 0; }