From 6594094f819e0020e926e137e47e2edb97ba500b Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Thu, 10 Sep 2020 11:54:05 +0800 Subject: [PATCH 1/6] drm/i915/gvt: correct a false comment of flag F_UNALIGN Correct falsely removed comment of flag F_UNALIGN. Fixes: a6c5817a38cf ("drm/i915/gvt: remove flag F_CMD_ACCESSED") Reviewed-by: Zhenyu Wang Signed-off-by: Yan Zhao Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20200910035405.20273-1-yan.y.zhao@intel.com --- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 9831361f181eb..a81cf0f01e78e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -255,7 +255,7 @@ struct intel_gvt_mmio { #define F_CMD_ACCESS (1 << 3) /* This reg has been accessed by a VM */ #define F_ACCESSED (1 << 4) -/* This reg has been accessed through GPU commands */ +/* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) /* This reg is in GVT's mmio save-restor list and in hardware * logical context image From 5f60b12edcd0c2e83650a6f9aa4a969bd9fc5732 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Tue, 27 Oct 2020 12:53:08 +0800 Subject: [PATCH 2/6] drm/i915/gvt: Save/restore HW status to support GVT suspend/resume This patch save/restore necessary GVT info during i915 suspend/resume so that GVT enabled QEMU VM can continue running. Only GGTT and fence regs are saved/restored now. GVT will save GGTT entries on each host_entry update, restore the saved dirty entries and re-init fence regs in resume routine. V2: - Change kzalloc/kfree to vzalloc/vfree since the space allocated from kmalloc may not enough for all saved GGTT entries. - Keep gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c and move the actual implementation to gvt.h/gvt.c. (zhenyu) - Check gvt config on and active with intel_gvt_active(). (zhenyu) V3: (zhenyu) - Incorrect copy length. Should be num entries * entry size. - Use memcpy_toio()/memcpy_fromio() instead of memcpy for iomem. - Add F_PM_SAVE flags to indicate which MMIOs to save/restore for PM. V4: Rebase. V5: Fail intel_gvt_save_ggtt as -ENOMEM if fail to alloc memory to save ggtt. Free allocated ggtt_entries on failure. V6: Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update. V7: Restore GGTT entry based on present bit. Split fence restore and mmio restore in different functions. Reviewed-by: Zhenyu Wang Signed-off-by: Hang Yuan Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201027045308.158955-1-colin.xu@intel.com --- drivers/gpu/drm/i915/gvt/gtt.c | 64 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/gtt.h | 4 ++ drivers/gpu/drm/i915/gvt/gvt.c | 9 ++++ drivers/gpu/drm/i915/gvt/gvt.h | 3 ++ drivers/gpu/drm/i915/gvt/handlers.c | 44 ++++++++++++++++++-- drivers/gpu/drm/i915/gvt/mmio.h | 4 ++ drivers/gpu/drm/i915/intel_gvt.c | 15 +++++++ drivers/gpu/drm/i915/intel_gvt.h | 5 +++ 8 files changed, 145 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index a3a4305eda01b..897c007ea96a1 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -636,9 +636,18 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + unsigned long offset = index; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); + if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { + offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT); + mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64; + } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { + offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT); + mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64; + } + pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); } @@ -1944,6 +1953,21 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) return ERR_PTR(-ENOMEM); } + mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); + if (!mm->ggtt_mm.host_ggtt_aperture) { + vfree(mm->ggtt_mm.virtual_ggtt); + vgpu_free_mm(mm); + return ERR_PTR(-ENOMEM); + } + + mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); + if (!mm->ggtt_mm.host_ggtt_hidden) { + vfree(mm->ggtt_mm.host_ggtt_aperture); + vfree(mm->ggtt_mm.virtual_ggtt); + vgpu_free_mm(mm); + return ERR_PTR(-ENOMEM); + } + return mm; } @@ -1971,6 +1995,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); + vfree(mm->ggtt_mm.host_ggtt_aperture); + vfree(mm->ggtt_mm.host_ggtt_hidden); } vgpu_free_mm(mm); @@ -2852,3 +2878,41 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) intel_vgpu_destroy_all_ppgtt_mm(vgpu); intel_vgpu_reset_ggtt(vgpu, true); } + +/** + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries + * @gvt: intel gvt device + * + * This function is called at driver resume stage to restore + * GGTT entries of every vGPU. + * + */ +void intel_gvt_restore_ggtt(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + struct intel_vgpu_mm *mm; + int id; + gen8_pte_t pte; + u32 idx, num_low, num_hi, offset; + + /* Restore dirty host ggtt for all vGPUs */ + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mm = vgpu->gtt.ggtt_mm; + + num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; + offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; + for (idx = 0; idx < num_low; idx++) { + pte = mm->ggtt_mm.host_ggtt_aperture[idx]; + if (pte & _PAGE_PRESENT) + write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); + } + + num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; + offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; + for (idx = 0; idx < num_hi; idx++) { + pte = mm->ggtt_mm.host_ggtt_hidden[idx]; + if (pte & _PAGE_PRESENT) + write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); + } + } +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 52d0d88abd86a..b0e173f2d9904 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -164,6 +164,9 @@ struct intel_vgpu_mm { } ppgtt_mm; struct { void *virtual_ggtt; + /* Save/restore for PM */ + u64 *host_ggtt_aperture; + u64 *host_ggtt_hidden; struct list_head partial_pte_list; } ggtt_mm; }; @@ -280,5 +283,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu); +void intel_gvt_restore_ggtt(struct intel_gvt *gvt); #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index c7c5612378832..495b6afd65713 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -405,6 +405,15 @@ int intel_gvt_init_device(struct drm_i915_private *i915) return ret; } +int +intel_gvt_pm_resume(struct intel_gvt *gvt) +{ + intel_gvt_restore_fence(gvt); + intel_gvt_restore_mmio(gvt); + intel_gvt_restore_ggtt(gvt); + return 0; +} + int intel_gvt_register_hypervisor(struct intel_gvt_mpt *m) { diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index a81cf0f01e78e..b3d6355dd797d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -255,6 +255,8 @@ struct intel_gvt_mmio { #define F_CMD_ACCESS (1 << 3) /* This reg has been accessed by a VM */ #define F_ACCESSED (1 << 4) +/* This reg requires save & restore during host PM suspend/resume */ +#define F_PM_SAVE (1 << 5) /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) /* This reg is in GVT's mmio save-restor list and in hardware @@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); +int intel_gvt_pm_resume(struct intel_gvt *gvt); #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index c7cf15fe9ef60..62b49c72a7d09 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3091,9 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS, - NULL, gen9_trtte_write); - MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); + MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE, + NULL, gen9_trtte_write); + MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE, + NULL, gen9_trtt_chicken_write); MMIO_D(_MMIO(0x46430), D_SKL_PLUS); @@ -3630,3 +3631,40 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) : intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes); } + +void intel_gvt_restore_fence(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int i, id; + + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mmio_hw_access_pre(gvt->gt); + for (i = 0; i < vgpu_fence_sz(vgpu); i++) + intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i))); + mmio_hw_access_post(gvt->gt); + } +} + +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt, + u32 offset, void *data) +{ + struct intel_vgpu *vgpu = data; + struct drm_i915_private *dev_priv = gvt->gt->i915; + + if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + + return 0; +} + +void intel_gvt_restore_mmio(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int id; + + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mmio_hw_access_pre(gvt->gt); + intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); + mmio_hw_access_post(gvt->gt); + } +} diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index cc4812648bf4a..9e862dc73579b 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -104,4 +104,8 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); + +void intel_gvt_restore_fence(struct intel_gvt *gvt); +void intel_gvt_restore_mmio(struct intel_gvt *gvt); + #endif diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 99fe8aef1c67f..4e70c1a9ef2ed 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -24,6 +24,7 @@ #include "i915_drv.h" #include "i915_vgpu.h" #include "intel_gvt.h" +#include "gvt/gvt.h" /** * DOC: Intel GVT-g host support @@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) intel_gvt_clean_device(dev_priv); } + +/** + * intel_gvt_resume - GVT resume routine wapper + * + * @dev_priv: drm i915 private * + * + * This function is called at the i915 driver resume stage to restore required + * HW status for GVT so that vGPU can continue running after resumed. + */ +void intel_gvt_resume(struct drm_i915_private *dev_priv) +{ + if (intel_gvt_active(dev_priv)) + intel_gvt_pm_resume(dev_priv->gvt); +} diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index 502fad8a8652c..d7d3fb6186fdd 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv); void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); +void intel_gvt_resume(struct drm_i915_private *dev_priv); #else static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { @@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) { } + +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) +{ +} #endif #endif /* _INTEL_GVT_H_ */ From 385fc38c1f97387c47113254d8de2cb0dbf133bd Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Tue, 27 Oct 2020 12:54:06 +0800 Subject: [PATCH 3/6] drm/i915: Add GVT resume routine to i915 This patch add gvt resume wrapper into i915_drm_resume(). GVT relies on i915 so resume gvt at last. V2: - Direct call into gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c. The wrapper and implementation will check and call gvt routine. (zhenyu) V3: Refresh. V4: Rebase. V5: Fail intel_gvt_suspend() if fail to save GGTT. V6: Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update so only need the resume routine. V7: Refresh. Reviewed-by: Zhenyu Wang Signed-off-by: Hang Yuan Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201027045406.159566-1-colin.xu@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e1e056ffff29d..751e258d21038 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1271,6 +1271,8 @@ static int i915_drm_resume(struct drm_device *dev) intel_power_domains_enable(dev_priv); + intel_gvt_resume(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return 0; From a5a8ef937cfa79167f4b2a5602092b8d14fd6b9a Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 9 Nov 2020 15:39:22 +0800 Subject: [PATCH 4/6] drm/i915/gvt: Fix virtual display setup for BXT/APL Program display related vregs to proper value at initialization, setup virtual monitor and hotplug. vGPU virtual display vregs inherit the value from pregs. The virtual DP monitor is always setup on PORT_B for BXT/APL. However the host may connect monitor on other PORT or without any monitor connected. Without properly setup PIPE/DDI/PLL related vregs, guest driver may not setup the virutal display as expected, and the guest desktop may not be created. Since only one virtual display is supported, enable PIPE_A only. And enable transcoder/DDI/PLL based on which port is setup for BXT/APL. V2: Revise commit message. V3: set_edid should on PORT_B for BXT. Inject hpd event for BXT. V4: Temporarily disable vfio edid on BXT/APL until issue fixed. V5: Rebase to use new HPD define GEN8_DE_PORT_HOTPLUG for BXT. Put vfio edid disabling on BXT/APL to a separate patch. Acked-by: Zhenyu Wang Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201109073922.757759-1-colin.xu@intel.com --- drivers/gpu/drm/i915/gvt/display.c | 179 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/mmio.c | 5 + 2 files changed, 184 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 5b5c71a0b4af5..5c1fcac260d3b 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -173,22 +173,162 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) int pipe; if (IS_BROXTON(dev_priv)) { + enum transcoder trans; + enum port port; + + /* Clear PIPE, DDI, PHY, HPD before setting new */ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); + for_each_pipe(dev_priv, pipe) { + vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= + ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE); + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; + } + + for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) { + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(trans)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE); + } + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + + for (port = PORT_A; port <= PORT_C; port++) { + vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |= + (BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &= + ~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + + vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &= + ~(DDI_INIT_DISPLAY_DETECTED | + DDI_BUF_CTL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE; + } + + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30); + + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED; + + /* + * Only 1 PIPE enabled in current vGPU display and PIPE_A is + * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, + * TRANSCODER_A can be enabled. PORT_x depends on the input of + * setup_virtual_dp_monitor. + */ + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE; + + /* + * Golden M/N are calculated based on: + * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), + * DP link clk 1620 MHz and non-constant_n. + * TODO: calculate DP link symbol clk and stream clk m/n. + */ + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; + + /* Enable per-DDI/PORT vreg */ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= + (DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= + DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_B << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= + DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_B << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); } @@ -520,6 +660,45 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTD_HOTPLUG_STATUS_MASK; intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG); + } else if (IS_BROXTON(i915)) { + if (connected) { + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= + SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= + SFUSE_STRAP_DDIC_DETECTED; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); + } + } else { + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= + ~SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= + ~SFUSE_STRAP_DDIC_DETECTED; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); + } + } + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= + PORTB_HOTPLUG_STATUS_MASK; + intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG); } } diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index b6811f6a230df..24210b1eaec58 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -280,6 +280,11 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; + vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= + SKL_FUSE_DOWNLOAD_STATUS | + SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG2); } } else { #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) From 64e65f443a43c35967242fd13b494c251bc5d7ee Mon Sep 17 00:00:00 2001 From: Deepak R Varma Date: Wed, 4 Nov 2020 17:45:32 +0530 Subject: [PATCH 5/6] drm/i915/gvt: replace idr_init() by idr_init_base() idr_init() uses base 0 which is an invalid identifier. The new function idr_init_base allows IDR to set the ID lookup from base 1. This avoids all lookups that otherwise starts from 0 since 0 is always unused. References: commit 6ce711f27500 ("idr: Make 1-based IDRs more efficient") Reviewed-by: Zhenyu Wang Signed-off-by: Deepak R Varma Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201104121532.GA48202@localhost --- drivers/gpu/drm/i915/gvt/gvt.c | 2 +- drivers/gpu/drm/i915/gvt/vgpu.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 495b6afd65713..0262a6fa3a6c6 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -312,7 +312,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915) gvt_dbg_core("init gvt device\n"); - idr_init(&gvt->vgpu_idr); + idr_init_base(&gvt->vgpu_idr, 1); spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); mutex_init(&gvt->sched_lock); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 8fa9b31a24840..85bfcc15385fd 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -392,7 +392,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, mutex_init(&vgpu->dmabuf_lock); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); - idr_init(&vgpu->object_idr); + idr_init_base(&vgpu->object_idr, 1); intel_vgpu_init_cfg_space(vgpu, param->primary); vgpu->d3_entered = false; From 9a3a238b3de97b4210c6de66aa88b2d7021ac086 Mon Sep 17 00:00:00 2001 From: Julian Stecklina Date: Wed, 11 Nov 2020 18:28:11 +0100 Subject: [PATCH 6/6] drm/i915/gvt: treat intel_gvt_mpt as const in gvt code The current interface of intel_gvt_register_hypervisor() expects a non-const pointer to struct intel_gvt_mpt, even though the mediator never modifies (or should modifiy) the content of this struct. Change the function signature and relevant struct members to const to properly express the API's intent and allow instances of intel_gvt_mpt to be allocated as const. While I was here, I also made KVM's instance of this struct const to reduce the number of writable function pointers in the kernel. Cc: Zhenyu Wang Cc: intel-gvt-dev@lists.freedesktop.org Reviewed-by: Zhenyu Wang Signed-off-by: Julian Stecklina Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20201111172811.558443-1-julian.stecklina@cyberus-technology.de --- drivers/gpu/drm/i915/gvt/gvt.c | 2 +- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 2 +- drivers/gpu/drm/i915/gvt/mpt.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 0262a6fa3a6c6..d1d8ee4a5f16a 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -415,7 +415,7 @@ intel_gvt_pm_resume(struct intel_gvt *gvt) } int -intel_gvt_register_hypervisor(struct intel_gvt_mpt *m) +intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) { int ret; void *gvt; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index b3d6355dd797d..cf3578e3f4dda 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -56,7 +56,7 @@ struct intel_gvt_host { struct device *dev; bool initialized; int hypervisor_type; - struct intel_gvt_mpt *mpt; + const struct intel_gvt_mpt *mpt; }; extern struct intel_gvt_host intel_gvt_host; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index ad8a9df49f295..d830b6c652840 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -2099,7 +2099,7 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) return ret; } -static struct intel_gvt_mpt kvmgt_mpt = { +static const struct intel_gvt_mpt kvmgt_mpt = { .type = INTEL_GVT_HYPERVISOR_KVM, .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 9ad224df9c68b..6f92cde71971e 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -392,7 +392,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); } -int intel_gvt_register_hypervisor(struct intel_gvt_mpt *); +int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *); void intel_gvt_unregister_hypervisor(void); #endif /* _GVT_MPT_H_ */