mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
Merge tag 'drm-intel-fixes-2017-01-05' of git://anongit.freedesktop.org/git/drm-intel
Pull i915 drm fixes from Jani Nikula: "Here's a bunch of drm/i915 fixes for v4.10-rc3. It includes GVT-g fixes. My new year's resolution is to start using signed tags for pulls. If that feels like a déjà vu, it's ((new year's) resolution), not (new (year's resolution))" [ Taking this directly from Jani because Dave Airlie is only partially connected right now. - Linus ] * tag 'drm-intel-fixes-2017-01-05' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: Prevent timeline updates whilst performing reset drm/i915: Silence allocation failure during sg_trim() drm/i915: Don't clflush before release phys object drm/i915: Fix oops in overlay due to frontbuffer tracking drm/i915: Fix oopses in the overlay code due to i915_gem_active stuff drm/i915: Initialize overlay->last_flip properly drm/i915: Move the min_pixclk[] handling to the end of readout drm/i915: Force VDD off on the new power seqeuencer before starting to use it drm/i915/gvt: fix typo in cfg_space range check drm/i915/gvt: fix an issue in emulating cfg space PCI_COMMAND drm/i915/gvt/kvmgt: trival: code cleanup drm/i915/gvt/kvmgt: prevent double-release of vgpu drm/i915/gvt/kvmgt: check returned slot for gfn drm/i915/gvt/kvmgt: dereference the pointer within lock drm/i915/gvt: reset the GGTT entry when vGPU created drm/i915/gvt: fix an error in opregion handling
This commit is contained in:
commit
ed40875dd4
11 changed files with 191 additions and 46 deletions
|
@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
|
||||||
u8 changed = old ^ new;
|
u8 changed = old ^ new;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||||
if (!(changed & PCI_COMMAND_MEMORY))
|
if (!(changed & PCI_COMMAND_MEMORY))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
if (WARN_ON(bytes > 4))
|
if (WARN_ON(bytes > 4))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
|
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* First check if it's PCI_COMMAND */
|
/* First check if it's PCI_COMMAND */
|
||||||
|
|
|
@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
||||||
INIT_LIST_HEAD(>t->oos_page_list_head);
|
INIT_LIST_HEAD(>t->oos_page_list_head);
|
||||||
INIT_LIST_HEAD(>t->post_shadow_list_head);
|
INIT_LIST_HEAD(>t->post_shadow_list_head);
|
||||||
|
|
||||||
|
intel_vgpu_reset_ggtt(vgpu);
|
||||||
|
|
||||||
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
|
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
|
||||||
NULL, 1, 0);
|
NULL, 1, 0);
|
||||||
if (IS_ERR(ggtt_mm)) {
|
if (IS_ERR(ggtt_mm)) {
|
||||||
|
@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||||
int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
void *page_addr;
|
||||||
|
|
||||||
gvt_dbg_core("init gtt\n");
|
gvt_dbg_core("init gtt\n");
|
||||||
|
|
||||||
|
@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gvt->gtt.scratch_ggtt_page =
|
||||||
|
alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||||
|
if (!gvt->gtt.scratch_ggtt_page) {
|
||||||
|
gvt_err("fail to allocate scratch ggtt page\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
page_addr = page_address(gvt->gtt.scratch_ggtt_page);
|
||||||
|
|
||||||
|
gvt->gtt.scratch_ggtt_mfn =
|
||||||
|
intel_gvt_hypervisor_virt_to_mfn(page_addr);
|
||||||
|
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
|
||||||
|
gvt_err("fail to translate scratch ggtt page\n");
|
||||||
|
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
if (enable_out_of_sync) {
|
if (enable_out_of_sync) {
|
||||||
ret = setup_spt_oos(gvt);
|
ret = setup_spt_oos(gvt);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||||
*/
|
*/
|
||||||
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
|
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||||
|
|
||||||
if (enable_out_of_sync)
|
if (enable_out_of_sync)
|
||||||
clean_spt_oos(gvt);
|
clean_spt_oos(gvt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_vgpu_reset_ggtt - reset the GGTT entry
|
||||||
|
* @vgpu: a vGPU
|
||||||
|
*
|
||||||
|
* This function is called at the vGPU create stage
|
||||||
|
* to reset all the GGTT entries.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||||
|
{
|
||||||
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||||
|
u32 index;
|
||||||
|
u32 offset;
|
||||||
|
u32 num_entries;
|
||||||
|
struct intel_gvt_gtt_entry e;
|
||||||
|
|
||||||
|
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
|
||||||
|
e.type = GTT_TYPE_GGTT_PTE;
|
||||||
|
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
|
||||||
|
e.val64 |= _PAGE_PRESENT;
|
||||||
|
|
||||||
|
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
|
||||||
|
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
|
||||||
|
for (offset = 0; offset < num_entries; offset++)
|
||||||
|
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||||
|
|
||||||
|
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
|
||||||
|
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
|
||||||
|
for (offset = 0; offset < num_entries; offset++)
|
||||||
|
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||||
|
}
|
||||||
|
|
|
@ -81,6 +81,9 @@ struct intel_gvt_gtt {
|
||||||
struct list_head oos_page_use_list_head;
|
struct list_head oos_page_use_list_head;
|
||||||
struct list_head oos_page_free_list_head;
|
struct list_head oos_page_free_list_head;
|
||||||
struct list_head mm_lru_list_head;
|
struct list_head mm_lru_list_head;
|
||||||
|
|
||||||
|
struct page *scratch_ggtt_page;
|
||||||
|
unsigned long scratch_ggtt_mfn;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -202,6 +205,7 @@ struct intel_vgpu_gtt {
|
||||||
|
|
||||||
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
|
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
|
||||||
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||||
|
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||||
|
|
|
@ -175,6 +175,7 @@ struct intel_vgpu {
|
||||||
struct notifier_block group_notifier;
|
struct notifier_block group_notifier;
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
struct work_struct release_work;
|
struct work_struct release_work;
|
||||||
|
atomic_t released;
|
||||||
} vdev;
|
} vdev;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
|
@ -114,12 +114,15 @@ static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
||||||
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
||||||
{
|
{
|
||||||
struct gvt_dma *entry;
|
struct gvt_dma *entry;
|
||||||
|
kvm_pfn_t pfn;
|
||||||
|
|
||||||
mutex_lock(&vgpu->vdev.cache_lock);
|
mutex_lock(&vgpu->vdev.cache_lock);
|
||||||
entry = __gvt_cache_find(vgpu, gfn);
|
|
||||||
mutex_unlock(&vgpu->vdev.cache_lock);
|
|
||||||
|
|
||||||
return entry == NULL ? 0 : entry->pfn;
|
entry = __gvt_cache_find(vgpu, gfn);
|
||||||
|
pfn = (entry == NULL) ? 0 : entry->pfn;
|
||||||
|
|
||||||
|
mutex_unlock(&vgpu->vdev.cache_lock);
|
||||||
|
return pfn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
|
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
|
||||||
|
@ -497,7 +500,16 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
||||||
goto undo_iommu;
|
goto undo_iommu;
|
||||||
}
|
}
|
||||||
|
|
||||||
return kvmgt_guest_init(mdev);
|
ret = kvmgt_guest_init(mdev);
|
||||||
|
if (ret)
|
||||||
|
goto undo_group;
|
||||||
|
|
||||||
|
atomic_set(&vgpu->vdev.released, 0);
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
undo_group:
|
||||||
|
vfio_unregister_notifier(&mdev->dev, VFIO_GROUP_NOTIFY,
|
||||||
|
&vgpu->vdev.group_notifier);
|
||||||
|
|
||||||
undo_iommu:
|
undo_iommu:
|
||||||
vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
|
vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
|
||||||
|
@ -509,17 +521,26 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
||||||
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct kvmgt_guest_info *info;
|
struct kvmgt_guest_info *info;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!handle_valid(vgpu->handle))
|
if (!handle_valid(vgpu->handle))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
|
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
|
||||||
|
return;
|
||||||
|
|
||||||
|
ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
|
||||||
&vgpu->vdev.iommu_notifier);
|
&vgpu->vdev.iommu_notifier);
|
||||||
vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
|
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
|
||||||
|
|
||||||
|
ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
|
||||||
&vgpu->vdev.group_notifier);
|
&vgpu->vdev.group_notifier);
|
||||||
|
WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
|
||||||
|
|
||||||
info = (struct kvmgt_guest_info *)vgpu->handle;
|
info = (struct kvmgt_guest_info *)vgpu->handle;
|
||||||
kvmgt_guest_exit(info);
|
kvmgt_guest_exit(info);
|
||||||
|
|
||||||
|
vgpu->vdev.kvm = NULL;
|
||||||
vgpu->handle = 0;
|
vgpu->handle = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
|
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
|
||||||
vdev.release_work);
|
vdev.release_work);
|
||||||
|
|
||||||
__intel_vgpu_release(vgpu);
|
__intel_vgpu_release(vgpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
|
||||||
|
|
||||||
idx = srcu_read_lock(&kvm->srcu);
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
slot = gfn_to_memslot(kvm, gfn);
|
slot = gfn_to_memslot(kvm, gfn);
|
||||||
|
if (!slot) {
|
||||||
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
|
@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
|
||||||
|
|
||||||
idx = srcu_read_lock(&kvm->srcu);
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
slot = gfn_to_memslot(kvm, gfn);
|
slot = gfn_to_memslot(kvm, gfn);
|
||||||
|
if (!slot) {
|
||||||
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
|
@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
||||||
|
|
||||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
||||||
{
|
{
|
||||||
struct intel_vgpu *vgpu;
|
|
||||||
|
|
||||||
if (!info) {
|
if (!info) {
|
||||||
gvt_err("kvmgt_guest_info invalid\n");
|
gvt_err("kvmgt_guest_info invalid\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
vgpu = info->vgpu;
|
|
||||||
|
|
||||||
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
||||||
kvmgt_protect_table_destroy(info);
|
kvmgt_protect_table_destroy(info);
|
||||||
gvt_cache_destroy(vgpu);
|
gvt_cache_destroy(info->vgpu);
|
||||||
vfree(info);
|
vfree(info);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
|
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
|
||||||
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
|
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
|
||||||
+ i * PAGE_SIZE);
|
+ i * PAGE_SIZE);
|
||||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||||
gvt_err("fail to get MFN from VA\n");
|
gvt_err("fail to get MFN from VA\n");
|
||||||
|
|
|
@ -244,14 +244,16 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||||
|
|
||||||
static void
|
static void
|
||||||
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||||
struct sg_table *pages)
|
struct sg_table *pages,
|
||||||
|
bool needs_clflush)
|
||||||
{
|
{
|
||||||
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
|
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
|
||||||
|
|
||||||
if (obj->mm.madv == I915_MADV_DONTNEED)
|
if (obj->mm.madv == I915_MADV_DONTNEED)
|
||||||
obj->mm.dirty = false;
|
obj->mm.dirty = false;
|
||||||
|
|
||||||
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
|
if (needs_clflush &&
|
||||||
|
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
|
||||||
!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
|
!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
|
||||||
drm_clflush_sg(pages);
|
drm_clflush_sg(pages);
|
||||||
|
|
||||||
|
@ -263,7 +265,7 @@ static void
|
||||||
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
||||||
struct sg_table *pages)
|
struct sg_table *pages)
|
||||||
{
|
{
|
||||||
__i915_gem_object_release_shmem(obj, pages);
|
__i915_gem_object_release_shmem(obj, pages, false);
|
||||||
|
|
||||||
if (obj->mm.dirty) {
|
if (obj->mm.dirty) {
|
||||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||||
|
@ -2231,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
|
||||||
struct sgt_iter sgt_iter;
|
struct sgt_iter sgt_iter;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
__i915_gem_object_release_shmem(obj, pages);
|
__i915_gem_object_release_shmem(obj, pages, true);
|
||||||
|
|
||||||
i915_gem_gtt_finish_pages(obj, pages);
|
i915_gem_gtt_finish_pages(obj, pages);
|
||||||
|
|
||||||
|
@ -2322,7 +2324,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
|
||||||
if (orig_st->nents == orig_st->orig_nents)
|
if (orig_st->nents == orig_st->orig_nents)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
|
if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
new_sg = new_st.sgl;
|
new_sg = new_st.sgl;
|
||||||
|
@ -2728,6 +2730,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||||
struct drm_i915_gem_request *request;
|
struct drm_i915_gem_request *request;
|
||||||
struct i915_gem_context *incomplete_ctx;
|
struct i915_gem_context *incomplete_ctx;
|
||||||
struct intel_timeline *timeline;
|
struct intel_timeline *timeline;
|
||||||
|
unsigned long flags;
|
||||||
bool ring_hung;
|
bool ring_hung;
|
||||||
|
|
||||||
if (engine->irq_seqno_barrier)
|
if (engine->irq_seqno_barrier)
|
||||||
|
@ -2763,13 +2766,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||||
if (i915_gem_context_is_default(incomplete_ctx))
|
if (i915_gem_context_is_default(incomplete_ctx))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&engine->timeline->lock, flags);
|
||||||
|
spin_lock(&timeline->lock);
|
||||||
|
|
||||||
list_for_each_entry_continue(request, &engine->timeline->requests, link)
|
list_for_each_entry_continue(request, &engine->timeline->requests, link)
|
||||||
if (request->ctx == incomplete_ctx)
|
if (request->ctx == incomplete_ctx)
|
||||||
reset_request(request);
|
reset_request(request);
|
||||||
|
|
||||||
timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
|
|
||||||
list_for_each_entry(request, &timeline->requests, link)
|
list_for_each_entry(request, &timeline->requests, link)
|
||||||
reset_request(request);
|
reset_request(request);
|
||||||
|
|
||||||
|
spin_unlock(&timeline->lock);
|
||||||
|
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_gem_reset(struct drm_i915_private *dev_priv)
|
void i915_gem_reset(struct drm_i915_private *dev_priv)
|
||||||
|
|
|
@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active,
|
||||||
rcu_assign_pointer(active->request, request);
|
rcu_assign_pointer(active->request, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i915_gem_active_set_retire_fn - updates the retirement callback
|
||||||
|
* @active - the active tracker
|
||||||
|
* @fn - the routine called when the request is retired
|
||||||
|
* @mutex - struct_mutex used to guard retirements
|
||||||
|
*
|
||||||
|
* i915_gem_active_set_retire_fn() updates the function pointer that
|
||||||
|
* is called when the final request associated with the @active tracker
|
||||||
|
* is retired.
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
i915_gem_active_set_retire_fn(struct i915_gem_active *active,
|
||||||
|
i915_gem_retire_fn fn,
|
||||||
|
struct mutex *mutex)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(mutex);
|
||||||
|
active->retire = fn ?: i915_gem_retire_noop;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct drm_i915_gem_request *
|
static inline struct drm_i915_gem_request *
|
||||||
__i915_gem_active_peek(const struct i915_gem_active *active)
|
__i915_gem_active_peek(const struct i915_gem_active *active)
|
||||||
{
|
{
|
||||||
|
|
|
@ -16791,7 +16791,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
||||||
|
|
||||||
for_each_intel_crtc(dev, crtc) {
|
for_each_intel_crtc(dev, crtc) {
|
||||||
struct intel_crtc_state *crtc_state = crtc->config;
|
struct intel_crtc_state *crtc_state = crtc->config;
|
||||||
int pixclk = 0;
|
|
||||||
|
|
||||||
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
|
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
|
||||||
memset(crtc_state, 0, sizeof(*crtc_state));
|
memset(crtc_state, 0, sizeof(*crtc_state));
|
||||||
|
@ -16803,23 +16802,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
||||||
crtc->base.enabled = crtc_state->base.enable;
|
crtc->base.enabled = crtc_state->base.enable;
|
||||||
crtc->active = crtc_state->base.active;
|
crtc->active = crtc_state->base.active;
|
||||||
|
|
||||||
if (crtc_state->base.active) {
|
if (crtc_state->base.active)
|
||||||
dev_priv->active_crtcs |= 1 << crtc->pipe;
|
dev_priv->active_crtcs |= 1 << crtc->pipe;
|
||||||
|
|
||||||
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
|
|
||||||
pixclk = ilk_pipe_pixel_rate(crtc_state);
|
|
||||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
|
||||||
pixclk = crtc_state->base.adjusted_mode.crtc_clock;
|
|
||||||
else
|
|
||||||
WARN_ON(dev_priv->display.modeset_calc_cdclk);
|
|
||||||
|
|
||||||
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
|
|
||||||
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
|
|
||||||
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_priv->min_pixclk[crtc->pipe] = pixclk;
|
|
||||||
|
|
||||||
readout_plane_state(crtc);
|
readout_plane_state(crtc);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
|
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
|
||||||
|
@ -16892,6 +16877,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_intel_crtc(dev, crtc) {
|
for_each_intel_crtc(dev, crtc) {
|
||||||
|
int pixclk = 0;
|
||||||
|
|
||||||
crtc->base.hwmode = crtc->config->base.adjusted_mode;
|
crtc->base.hwmode = crtc->config->base.adjusted_mode;
|
||||||
|
|
||||||
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
|
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
|
||||||
|
@ -16919,10 +16906,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
||||||
*/
|
*/
|
||||||
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
|
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
|
||||||
|
|
||||||
|
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
|
||||||
|
pixclk = ilk_pipe_pixel_rate(crtc->config);
|
||||||
|
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||||
|
pixclk = crtc->config->base.adjusted_mode.crtc_clock;
|
||||||
|
else
|
||||||
|
WARN_ON(dev_priv->display.modeset_calc_cdclk);
|
||||||
|
|
||||||
|
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
|
||||||
|
if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
|
||||||
|
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
|
||||||
|
|
||||||
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
|
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
|
||||||
update_scanline_offset(crtc);
|
update_scanline_offset(crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev_priv->min_pixclk[crtc->pipe] = pixclk;
|
||||||
|
|
||||||
intel_pipe_config_sanity_check(dev_priv, crtc->config);
|
intel_pipe_config_sanity_check(dev_priv, crtc->config);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
||||||
struct intel_dp *intel_dp);
|
struct intel_dp *intel_dp);
|
||||||
static void
|
static void
|
||||||
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||||
struct intel_dp *intel_dp);
|
struct intel_dp *intel_dp,
|
||||||
|
bool force_disable_vdd);
|
||||||
static void
|
static void
|
||||||
intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
|
intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
|
||||||
|
|
||||||
|
@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
|
||||||
|
|
||||||
/* init power sequencer on this pipe and port */
|
/* init power sequencer on this pipe and port */
|
||||||
intel_dp_init_panel_power_sequencer(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer(dev, intel_dp);
|
||||||
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Even vdd force doesn't work until we've made
|
* Even vdd force doesn't work until we've made
|
||||||
|
@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
|
||||||
* Only the HW needs to be reprogrammed, the SW state is fixed and
|
* Only the HW needs to be reprogrammed, the SW state is fixed and
|
||||||
* has been setup during connector init.
|
* has been setup during connector init.
|
||||||
*/
|
*/
|
||||||
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
|
||||||
port_name(port), pipe_name(intel_dp->pps_pipe));
|
port_name(port), pipe_name(intel_dp->pps_pipe));
|
||||||
|
|
||||||
intel_dp_init_panel_power_sequencer(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer(dev, intel_dp);
|
||||||
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
|
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
|
||||||
|
@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
|
||||||
|
|
||||||
/* init power sequencer on this pipe and port */
|
/* init power sequencer on this pipe and port */
|
||||||
intel_dp_init_panel_power_sequencer(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer(dev, intel_dp);
|
||||||
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
|
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
|
||||||
|
@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
||||||
|
|
||||||
static void
|
static void
|
||||||
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||||
struct intel_dp *intel_dp)
|
struct intel_dp *intel_dp,
|
||||||
|
bool force_disable_vdd)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
u32 pp_on, pp_off, pp_div, port_sel = 0;
|
u32 pp_on, pp_off, pp_div, port_sel = 0;
|
||||||
|
@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||||
|
|
||||||
intel_pps_get_registers(dev_priv, intel_dp, ®s);
|
intel_pps_get_registers(dev_priv, intel_dp, ®s);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On some VLV machines the BIOS can leave the VDD
|
||||||
|
* enabled even on power seqeuencers which aren't
|
||||||
|
* hooked up to any port. This would mess up the
|
||||||
|
* power domain tracking the first time we pick
|
||||||
|
* one of these power sequencers for use since
|
||||||
|
* edp_panel_vdd_on() would notice that the VDD was
|
||||||
|
* already on and therefore wouldn't grab the power
|
||||||
|
* domain reference. Disable VDD first to avoid this.
|
||||||
|
* This also avoids spuriously turning the VDD on as
|
||||||
|
* soon as the new power seqeuencer gets initialized.
|
||||||
|
*/
|
||||||
|
if (force_disable_vdd) {
|
||||||
|
u32 pp = ironlake_get_pp_control(intel_dp);
|
||||||
|
|
||||||
|
WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
|
||||||
|
|
||||||
|
if (pp & EDP_FORCE_VDD)
|
||||||
|
DRM_DEBUG_KMS("VDD already on, disabling first\n");
|
||||||
|
|
||||||
|
pp &= ~EDP_FORCE_VDD;
|
||||||
|
|
||||||
|
I915_WRITE(regs.pp_ctrl, pp);
|
||||||
|
}
|
||||||
|
|
||||||
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
|
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
|
||||||
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
|
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
|
||||||
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
|
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
|
||||||
|
@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
|
||||||
vlv_initial_power_sequencer_setup(intel_dp);
|
vlv_initial_power_sequencer_setup(intel_dp);
|
||||||
} else {
|
} else {
|
||||||
intel_dp_init_panel_power_sequencer(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer(dev, intel_dp);
|
||||||
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
|
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay,
|
||||||
{
|
{
|
||||||
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
|
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
|
||||||
&overlay->i915->drm.struct_mutex));
|
&overlay->i915->drm.struct_mutex));
|
||||||
overlay->last_flip.retire = retire;
|
i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
|
||||||
|
&overlay->i915->drm.struct_mutex);
|
||||||
i915_gem_active_set(&overlay->last_flip, req);
|
i915_gem_active_set(&overlay->last_flip, req);
|
||||||
i915_add_request(req);
|
i915_add_request(req);
|
||||||
}
|
}
|
||||||
|
@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unpin;
|
goto out_unpin;
|
||||||
|
|
||||||
i915_gem_track_fb(overlay->vma->obj, new_bo,
|
i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
|
||||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||||
|
|
||||||
overlay->old_vma = overlay->vma;
|
overlay->old_vma = overlay->vma;
|
||||||
overlay->vma = vma;
|
overlay->vma = vma;
|
||||||
|
@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
||||||
overlay->contrast = 75;
|
overlay->contrast = 75;
|
||||||
overlay->saturation = 146;
|
overlay->saturation = 146;
|
||||||
|
|
||||||
|
init_request_active(&overlay->last_flip, NULL);
|
||||||
|
|
||||||
regs = intel_overlay_map_regs(overlay);
|
regs = intel_overlay_map_regs(overlay);
|
||||||
if (!regs)
|
if (!regs)
|
||||||
goto out_unpin_bo;
|
goto out_unpin_bo;
|
||||||
|
|
Loading…
Reference in a new issue