amdgpu, omap and snd regression fix

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJaz/QFAAoJEAx081l5xIa+rlAQAImeYZMp5e8V1NjPORwUmp+N
 VxHNErPc01vjTYqq2CNL+koJ1Rlq+cLbJZzHy3MT32U4jWVGykehMNmLah5fK55+
 nhgV5Ho/0OXqjWeI+trGpmg1BiEmoCP2dJOyugP9J9lo55k9mvzj5RoSDGAuAaiH
 jSrT2d4vnhvVFqNywBoqT5nGPgv0lqyp4o79jNykj9b8bGIU85K+KEhFzK7OXmtT
 suHJE7tfSriC5nsez/TNpQf2tvfbQM6cHOdmd6pk6NllxJWLq0YSUWIKJr7DolPh
 9s7FMHd1uwyEVMfoXVU8+5M0KirW8VwgUaWC6AwC75BMONH74OAlXFv9YnFiysSf
 mRsjtI1iJMn5Ri4I3VEghyc9/34ejaSxq9mfe5rc5AUpRt7QhPV16I/sm0E82/88
 bVymPJUpPnWERfs9p4VngNsV7hDCuXgrQWUCED0GRtEVcElKHIip/9RR6G4tB8HH
 qS2QZwaFZLpAncXldqirx8MO8xqZi2amab9O+GDpTMkyqT2+S66bnAw4LCmRbqTj
 Jw9JTY4Hb3P1pgYKCjJyLZzJXYw3+DvLMnwWdMe8g0Ms9sr2sqHuNBg4IeJt13qy
 DIyfks43eJtT8H5YEvyTFtBYNck26u3h2VBpMcXacadeNM5HeWA1lBwCVURC7Cwu
 MkeLcYcTVlQFjehkV0mL
 =WE2n
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-for-v4.17-rc1' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "One omap, and one alsa pm fix (we merged the breaking patch via drm
  tree).

  Otherwise it's two bunches of amdgpu fixes, removing an unneeded file,
  some DC fixes, HDMI audio regression fix, and some vega12 fixes"

* tag 'drm-fixes-for-v4.17-rc1' of git://people.freedesktop.org/~airlied/linux: (27 commits)
  Revert "drm/amd/display: disable CRTCs with NULL FB on their primary plane (V2)"
  Revert "drm/amd/display: fix dereferencing possible ERR_PTR()"
  drm/amd/display: Fix regamma not affecting full-intensity color values
  drm/amd/display: Fix FBC text console corruption
  drm/amd/display: Only register backlight device if embedded panel connected
  drm/amd/display: fix brightness level after resume from suspend
  drm/amd/display: HDMI has no sound after Panel power off/on
  drm/amdgpu: add MP1 and THM hw ip base reg offset
  drm/amdgpu: fix null pointer panic with direct fw loading on gpu reset
  drm/radeon: add PX quirk for Asus K73TK
  drm/omap: fix crash if there's no video PLL
  drm/amdgpu: Fix memory leaks at amdgpu_init() error path
  drm/amdgpu: Fix PCIe lane width calculation
  drm/radeon: Fix PCIe lane width calculation
  drm/amdgpu/si: implement get/set pcie_lanes asic callback
  drm/amdgpu: Add support for SRBM selection v3
  Revert "drm/amdgpu: Don't change preferred domian when fallback GTT v5"
  drm/amd/powerply: fix power reading on Fiji
  drm/amd/powerplay: Enable ACG SS feature
  drm/amdgpu/sdma: fix mask in emit_pipeline_sync
  ...
This commit is contained in:
Linus Torvalds 2018-04-12 20:56:10 -07:00
commit 16e205cf42
40 changed files with 430 additions and 1638 deletions

View file

@ -890,6 +890,7 @@ struct amdgpu_gfx_funcs {
void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
};
struct amdgpu_ngg_buf {
@ -1378,6 +1379,7 @@ enum amd_hw_ip_block_type {
ATHUB_HWIP,
NBIO_HWIP,
MP0_HWIP,
MP1_HWIP,
UVD_HWIP,
VCN_HWIP = UVD_HWIP,
VCE_HWIP,
@ -1387,6 +1389,7 @@ enum amd_hw_ip_block_type {
SMUIO_HWIP,
PWR_HWIP,
NBIF_HWIP,
THM_HWIP,
MAX_HWIP
};
@ -1812,6 +1815,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
/* Common functions */
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,

View file

@ -64,16 +64,21 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
#if defined(CONFIG_DEBUG_FS)
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
char __user *buf, size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank;
unsigned instance_bank, sh_bank, se_bank;
bool pm_pg_lock, use_bank, use_ring;
unsigned instance_bank, sh_bank, se_bank, me, pipe, queue;
if (size & 0x3 || *pos & 0x3)
pm_pg_lock = use_bank = use_ring = false;
instance_bank = sh_bank = se_bank = me = pipe = queue = 0;
if (size & 0x3 || *pos & 0x3 ||
((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
return -EINVAL;
/* are we reading registers for which a PG lock is necessary? */
@ -91,8 +96,15 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else if (*pos & (1ULL << 61)) {
me = (*pos & GENMASK_ULL(33, 24)) >> 24;
pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
use_ring = 1;
} else {
use_bank = 0;
use_bank = use_ring = 0;
}
*pos &= (1UL << 22) - 1;
@ -104,6 +116,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
} else if (use_ring) {
mutex_lock(&adev->srbm_mutex);
amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue);
}
if (pm_pg_lock)
@ -115,8 +130,14 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (*pos > adev->rmmio_size)
goto end;
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (read) {
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
WREG32(*pos >> 2, value);
}
if (r) {
result = r;
goto end;
@ -132,6 +153,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (use_bank) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
} else if (use_ring) {
amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
if (pm_pg_lock)
@ -140,78 +164,17 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
return result;
}
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
}
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank;
unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
/* are we reading registers for which a PG lock is necessary? */
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
if (sh_bank == 0x3FF)
sh_bank = 0xFFFFFFFF;
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
}
*pos &= (1UL << 22) - 1;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
}
if (pm_pg_lock)
mutex_lock(&adev->pm.mutex);
while (size) {
uint32_t value;
if (*pos > adev->rmmio_size)
return result;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
WREG32(*pos >> 2, value);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
if (use_bank) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
return result;
return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
}
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,

View file

@ -922,6 +922,11 @@ static int __init amdgpu_init(void)
{
int r;
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
}
r = amdgpu_sync_init();
if (r)
goto error_sync;
@ -930,10 +935,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
}
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;

View file

@ -410,6 +410,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
long timeout;
int r;
/* Check that num_hw_submission is a power of two */
@ -433,11 +434,16 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
/* for non-sriov case, no timeout enforce on compute ring */
if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
&& !amdgpu_sriov_vf(ring->adev))
timeout = MAX_SCHEDULE_TIMEOUT;
else
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
(ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ?
MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(amdgpu_lockup_timeout),
ring->name);
timeout, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);

View file

@ -56,11 +56,23 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
alignment = PAGE_SIZE;
}
retry:
r = amdgpu_bo_create(adev, size, alignment, initial_domain,
flags, type, resv, &bo);
if (r) {
DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
goto retry;
}
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
}
DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
}
*obj = &bo->gem_base;

View file

@ -356,7 +356,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
struct amdgpu_bo *bo;
unsigned long page_align;
size_t acc_size;
u32 domains;
int r;
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
@ -418,23 +417,12 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
#endif
bo->tbo.bdev = &adev->mman.bdev;
domains = bo->preferred_domains;
retry:
amdgpu_ttm_placement_from_domain(bo, domains);
amdgpu_ttm_placement_from_domain(bo, domain);
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, &ctx, acc_size,
NULL, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r && r != -ERESTARTSYS)) {
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
goto retry;
} else if (domains != bo->preferred_domains) {
domains = bo->allowed_domains;
goto retry;
}
}
if (unlikely(r))
if (unlikely(r != 0))
return r;
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&

View file

@ -505,6 +505,9 @@ static int psp_resume(void *handle)
int psp_gpu_reset(struct amdgpu_device *adev)
{
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
return psp_mode1_reset(&adev->psp);
}

View file

@ -859,7 +859,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
}

View file

@ -3061,11 +3061,18 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q)
{
DRM_INFO("Not implemented\n");
}
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
.read_wave_data = &gfx_v6_0_read_wave_data,
.read_wave_sgprs = &gfx_v6_0_read_wave_sgprs,
.select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
};
static int gfx_v6_0_early_init(void *handle)

View file

@ -4270,11 +4270,18 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q)
{
cik_srbm_select(adev, me, pipe, q, 0);
}
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
.read_wave_data = &gfx_v7_0_read_wave_data,
.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
.select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {

View file

@ -3475,6 +3475,12 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
WREG32(mmGRBM_GFX_INDEX, data);
}
static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q)
{
vi_srbm_select(adev, me, pipe, q, 0);
}
static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@ -5442,6 +5448,7 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.select_se_sh = &gfx_v8_0_select_se_sh,
.read_wave_data = &gfx_v8_0_read_wave_data,
.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
};
static int gfx_v8_0_early_init(void *handle)

View file

@ -998,12 +998,19 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}
static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q)
{
soc15_grbm_select(adev, me, pipe, q, 0);
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@ -2757,6 +2764,45 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
return 0;
}
static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int j;
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
if (j == AMDGPU_MAX_USEC_TIMEOUT) {
DRM_DEBUG("KIQ dequeue request failed.\n");
/* Manual disable if dequeue request times out */
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
}
WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
0);
}
WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
return 0;
}
static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@ -3010,7 +3056,6 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
return r;
}
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -3033,6 +3078,20 @@ static int gfx_v9_0_hw_fini(void *handle)
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
return 0;
}
/* Use deinitialize sequence from CAIL when unbinding device from driver,
* otherwise KIQ is hanging when binding back
*/
if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
adev->gfx.kiq.ring.pipe,
adev->gfx.kiq.ring.queue, 0);
gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
gfx_v9_0_cp_enable(adev, false);
gfx_v9_0_rlc_stop(adev);

View file

@ -837,7 +837,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}

View file

@ -1105,7 +1105,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}

View file

@ -1121,7 +1121,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}

View file

@ -1252,6 +1252,71 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
}
}
static int si_get_pcie_lanes(struct amdgpu_device *adev)
{
u32 link_width_cntl;
if (adev->flags & AMD_IS_APU)
return 0;
link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
case LC_LINK_WIDTH_X1:
return 1;
case LC_LINK_WIDTH_X2:
return 2;
case LC_LINK_WIDTH_X4:
return 4;
case LC_LINK_WIDTH_X8:
return 8;
case LC_LINK_WIDTH_X0:
case LC_LINK_WIDTH_X16:
default:
return 16;
}
}
static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
{
u32 link_width_cntl, mask;
if (adev->flags & AMD_IS_APU)
return;
switch (lanes) {
case 0:
mask = LC_LINK_WIDTH_X0;
break;
case 1:
mask = LC_LINK_WIDTH_X1;
break;
case 2:
mask = LC_LINK_WIDTH_X2;
break;
case 4:
mask = LC_LINK_WIDTH_X4;
break;
case 8:
mask = LC_LINK_WIDTH_X8;
break;
case 16:
mask = LC_LINK_WIDTH_X16;
break;
default:
DRM_ERROR("invalid pcie lane request: %d\n", lanes);
return;
}
link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_LINK_WIDTH_MASK;
link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
link_width_cntl |= (LC_RECONFIG_NOW |
LC_RECONFIG_ARC_MISSING_ESCAPE);
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@ -1262,6 +1327,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL,
.get_pcie_lanes = &si_get_pcie_lanes,
.set_pcie_lanes = &si_set_pcie_lanes,
.get_config_memsize = &si_get_config_memsize,
.flush_hdp = &si_flush_hdp,
.invalidate_hdp = &si_invalidate_hdp,

View file

@ -6372,9 +6372,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
{
u32 lane_width;
u32 new_lane_width =
(amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
(amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
amdgpu_set_pcie_lanes(adev, new_lane_width);

View file

@ -38,6 +38,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
@ -49,7 +50,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
}
return 0;
}

View file

@ -1403,6 +1403,28 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
return ret;
}
static void register_backlight_device(struct amdgpu_display_manager *dm,
struct dc_link *link)
{
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
link->type != dc_connection_none) {
/* Event if registration failed, we should continue with
* DM initialization because not having a backlight control
* is better then a black screen.
*/
amdgpu_dm_register_backlight_device(dm);
if (dm->backlight_dev)
dm->backlight_link = link;
}
#endif
}
/* In this architecture, the association
* connector -> encoder -> crtc
* id not really requried. The crtc and connector will hold the
@ -1456,6 +1478,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
DRM_ERROR(
@ -1482,9 +1505,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
DETECT_REASON_BOOT))
link = dc_get_link_at_index(dm->dc, i);
if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
}
}
/* Software is initialized. Now we can register interrupt handlers. */
@ -2685,7 +2713,8 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
link->type != dc_connection_none) {
amdgpu_dm_register_backlight_device(dm);
if (dm->backlight_dev) {
@ -3561,6 +3590,7 @@ create_i2c(struct ddc_service *ddc_service,
return i2c;
}
/* Note: this function assumes that dc_link_detect() was called for the
* dc_link which will be represented by this aconnector.
*/
@ -3630,28 +3660,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector);
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
/* NOTE: this currently will create backlight device even if a panel
* is not connected to the eDP/LVDS connector.
*
* This is less than ideal but we don't have sink information at this
* stage since detection happens after. We can't do detection earlier
* since MST detection needs connectors to be created first.
*/
if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
/* Event if registration failed, we should continue with
* DM initialization because not having a backlight control
* is better then a black screen.
*/
amdgpu_dm_register_backlight_device(dm);
if (dm->backlight_dev)
dm->backlight_link = link;
}
#endif
out_free:
if (res) {
kfree(i2c);
@ -4840,33 +4848,6 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return -EDEADLK;
crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
if (crtc->primary == plane && crtc_state->active) {
if (!plane_state->fb)
return -EINVAL;
}
}
return 0;
}
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@ -4890,10 +4871,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
ret = dm_atomic_check_plane_state_fb(state, crtc);
if (ret)
goto fail;
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed)
continue;

View file

@ -1997,6 +1997,19 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
return true;
}
bool dc_link_set_abm_disable(const struct dc_link *link)
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
return false;
abm->funcs->set_abm_immediate_disable(abm);
return true;
}
bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
{
struct dc *core_dc = link->ctx->dc;

View file

@ -132,6 +132,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);

View file

@ -735,6 +735,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
if (info_frame->avi.valid) {
const uint32_t *content =
(const uint32_t *) &info_frame->avi.sb[0];
/*we need turn on clock before programming AFMT block*/
REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
REG_WRITE(AFMT_AVI_INFO0, content[0]);

View file

@ -102,6 +102,43 @@ static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
return 256 * ((pixels + 255) / 256);
}
static void reset_lb_on_vblank(struct dc_context *ctx)
{
uint32_t value, frame_count;
uint32_t retry = 0;
uint32_t status_pos =
dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
/* Only if CRTC is enabled and counter is moving we wait for one frame. */
if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
/* Resetting LB on VBlank */
value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
for (retry = 100; retry > 0; retry--) {
if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
break;
msleep(1);
}
if (!retry)
dm_error("Frame count did not increase for 100ms.\n");
/* Resetting LB on VBlank */
value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
}
}
static void wait_for_fbc_state_changed(
struct dce110_compressor *cp110,
bool enabled)
@ -232,19 +269,23 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
{
struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
if (compressor->options.bits.FBC_SUPPORT &&
dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
uint32_t reg_data;
/* Turn off compression */
reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
if (compressor->options.bits.FBC_SUPPORT) {
if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
uint32_t reg_data;
/* Turn off compression */
reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
/* Reset enum controller_id to undefined */
compressor->attached_inst = 0;
compressor->is_enabled = false;
/* Reset enum controller_id to undefined */
compressor->attached_inst = 0;
compressor->is_enabled = false;
wait_for_fbc_state_changed(cp110, false);
wait_for_fbc_state_changed(cp110, false);
}
/* Sync line buffer - dce100/110 only*/
reset_lb_on_vblank(compressor->ctx);
}
}

View file

@ -453,10 +453,13 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
} else {
/* 10 segments
* segment is from 2^-10 to 2^0
* segment is from 2^-10 to 2^1
* We include an extra segment for range [2^0, 2^1). This is to
* ensure that colors with normalized values of 1 don't miss the
* LUT.
*/
region_start = -10;
region_end = 0;
region_end = 1;
seg_distr[0] = 4;
seg_distr[1] = 4;
@ -468,7 +471,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
seg_distr[7] = 4;
seg_distr[8] = 4;
seg_distr[9] = 4;
seg_distr[10] = -1;
seg_distr[10] = 0;
seg_distr[11] = -1;
seg_distr[12] = -1;
seg_distr[13] = -1;
@ -1016,8 +1019,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
link->dc->hwss.edp_backlight_control(link, false);
dc_link_set_abm_disable(link);
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);

View file

@ -1264,9 +1264,9 @@ struct atom_smc_dpm_info_v4_1
uint8_t ledpin2;
uint8_t padding8_4;
uint8_t gfxclkspreadenabled;
uint8_t gfxclkspreadpercent;
uint16_t gfxclkspreadfreq;
uint8_t pllgfxclkspreadenabled;
uint8_t pllgfxclkspreadpercent;
uint16_t pllgfxclkspreadfreq;
uint8_t uclkspreadenabled;
uint8_t uclkspreadpercent;
@ -1276,7 +1276,11 @@ struct atom_smc_dpm_info_v4_1
uint8_t socclkspreadpercent;
uint16_t socclkspreadfreq;
uint32_t boardreserved[3];
uint8_t acggfxclkspreadenabled;
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
uint32_t boardreserved[10];
};

View file

@ -32,7 +32,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
vega10_thermal.o smu10_hwmgr.o pp_psm.o\
vega12_processpptables.o vega12_hwmgr.o \
vega12_powertune.o vega12_thermal.o \
vega12_thermal.o \
pp_overdriver.o smu_helper.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))

View file

@ -616,9 +616,9 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->ledpin1 = info->ledpin1;
param->ledpin2 = info->ledpin2;
param->gfxclkspreadenabled = info->gfxclkspreadenabled;
param->gfxclkspreadpercent = info->gfxclkspreadpercent;
param->gfxclkspreadfreq = info->gfxclkspreadfreq;
param->pllgfxclkspreadenabled = info->pllgfxclkspreadenabled;
param->pllgfxclkspreadpercent = info->pllgfxclkspreadpercent;
param->pllgfxclkspreadfreq = info->pllgfxclkspreadfreq;
param->uclkspreadenabled = info->uclkspreadenabled;
param->uclkspreadpercent = info->uclkspreadpercent;
@ -628,5 +628,9 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->socclkspreadpercent = info->socclkspreadpercent;
param->socclkspreadfreq = info->socclkspreadfreq;
param->acggfxclkspreadenabled = info->acggfxclkspreadenabled;
param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
return 0;
}

View file

@ -192,9 +192,9 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t ledpin1;
uint8_t ledpin2;
uint8_t gfxclkspreadenabled;
uint8_t gfxclkspreadpercent;
uint16_t gfxclkspreadfreq;
uint8_t pllgfxclkspreadenabled;
uint8_t pllgfxclkspreadpercent;
uint16_t pllgfxclkspreadfreq;
uint8_t uclkspreadenabled;
uint8_t uclkspreadpercent;
@ -203,6 +203,10 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t socclkspreadenabled;
uint8_t socclkspreadpercent;
uint16_t socclkspreadfreq;
uint8_t acggfxclkspreadenabled;
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,

View file

@ -3374,7 +3374,8 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
"Failed to start pm status log!",
return -1);
msleep_interruptible(20);
/* Sampling period from 50ms to 4sec */
msleep_interruptible(200);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogSample),

View file

@ -319,13 +319,13 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
&size, &frev, &crev);
if (crev != 9) {
pr_err("Unsupported IGP table: %d %d\n", frev, crev);
if (info == NULL) {
pr_err("Could not retrieve the Integrated System Info Table!\n");
return -EINVAL;
}
if (info == NULL) {
pr_err("Could not retrieve the Integrated System Info Table!\n");
if (crev != 9) {
pr_err("Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}

View file

@ -33,7 +33,6 @@
#include "ppatomfwctrl.h"
#include "atomfirmware.h"
#include "cgs_common.h"
#include "vega12_powertune.h"
#include "vega12_inc.h"
#include "pp_soc15.h"
#include "pppcielanes.h"
@ -893,6 +892,28 @@ static int vega12_odn_initialize_default_settings(
return 0;
}
static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
}
static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
{
int adjust_percent, result = 0;
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
adjust_percent =
hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
result = vega12_set_overdrive_target_percentage(hwmgr,
(uint32_t)adjust_percent);
}
return result;
}
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;

File diff suppressed because it is too large Load diff

View file

@ -1,53 +0,0 @@
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _VEGA12_POWERTUNE_H_
#define _VEGA12_POWERTUNE_H_
enum vega12_didt_config_reg_type {
VEGA12_CONFIGREG_DIDT = 0,
VEGA12_CONFIGREG_GCCAC,
VEGA12_CONFIGREG_SECAC
};
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
struct vega12_didt_config_reg {
uint32_t offset;
uint32_t mask;
uint32_t shift;
uint32_t value;
};
int vega12_enable_power_containment(struct pp_hwmgr *hwmgr);
int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int vega12_power_control_set_level(struct pp_hwmgr *hwmgr);
int vega12_disable_power_containment(struct pp_hwmgr *hwmgr);
int vega12_enable_didt_config(struct pp_hwmgr *hwmgr);
int vega12_disable_didt_config(struct pp_hwmgr *hwmgr);
#endif /* _VEGA12_POWERTUNE_H_ */

View file

@ -208,9 +208,9 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->LedPin1 = smc_dpm_table.ledpin1;
ppsmc_pptable->LedPin2 = smc_dpm_table.ledpin2;
ppsmc_pptable->GfxclkSpreadEnabled = smc_dpm_table.gfxclkspreadenabled;
ppsmc_pptable->GfxclkSpreadPercent = smc_dpm_table.gfxclkspreadpercent;
ppsmc_pptable->GfxclkSpreadFreq = smc_dpm_table.gfxclkspreadfreq;
ppsmc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table.pllgfxclkspreadenabled;
ppsmc_pptable->PllGfxclkSpreadPercent = smc_dpm_table.pllgfxclkspreadpercent;
ppsmc_pptable->PllGfxclkSpreadFreq = smc_dpm_table.pllgfxclkspreadfreq;
ppsmc_pptable->UclkSpreadEnabled = 0;
ppsmc_pptable->UclkSpreadPercent = smc_dpm_table.uclkspreadpercent;
@ -220,6 +220,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->SocclkSpreadPercent = smc_dpm_table.socclkspreadpercent;
ppsmc_pptable->SocclkSpreadFreq = smc_dpm_table.socclkspreadfreq;
ppsmc_pptable->AcgGfxclkSpreadEnabled = smc_dpm_table.acggfxclkspreadenabled;
ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
return 0;
}

View file

@ -127,7 +127,7 @@
#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT )
#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT )
#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT )
#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT)
#define FEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT )
#define FEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
#define FEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
@ -481,9 +481,9 @@ typedef struct {
uint8_t padding8_4;
uint8_t GfxclkSpreadEnabled;
uint8_t GfxclkSpreadPercent;
uint16_t GfxclkSpreadFreq;
uint8_t PllGfxclkSpreadEnabled;
uint8_t PllGfxclkSpreadPercent;
uint16_t PllGfxclkSpreadFreq;
uint8_t UclkSpreadEnabled;
uint8_t UclkSpreadPercent;
@ -493,7 +493,11 @@ typedef struct {
uint8_t SocclkSpreadPercent;
uint16_t SocclkSpreadFreq;
uint32_t BoardReserved[3];
uint8_t AcgGfxclkSpreadEnabled;
uint8_t AcgGfxclkSpreadPercent;
uint16_t AcgGfxclkSpreadFreq;
uint32_t BoardReserved[10];
uint32_t MmHubPadding[7];

View file

@ -30,8 +30,7 @@
#include "ppatomctrl.h"
#include "pp_debug.h"
#include "smu_ucode_xfer_vi.h"
#include "smu7_smumgr.h"
/* MP Apertures */
#define MP0_Public 0x03800000
@ -392,8 +391,7 @@ static int vega12_smu_init(struct pp_hwmgr *hwmgr)
struct cgs_firmware_info info = {0};
int ret;
ret = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU,
&info);
if (ret || !info.kptr)
return -EINVAL;

View file

@ -142,7 +142,7 @@ static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
}
struct dpi_clk_calc_ctx {
struct dss_pll *pll;
struct dpi_data *dpi;
unsigned int clkout_idx;
/* inputs */
@ -191,7 +191,7 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
return dispc_div_calc(ctx->pll->dss->dispc, dispc,
return dispc_div_calc(ctx->dpi->dss->dispc, dispc,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
@ -208,8 +208,8 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
ctx->pll_cinfo.fint = fint;
ctx->pll_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
ctx->pck_min, dss_get_max_fck_rate(ctx->pll->dss),
return dss_pll_hsdiv_calc_a(ctx->dpi->pll, clkdco,
ctx->pck_min, dss_get_max_fck_rate(ctx->dpi->dss),
dpi_calc_hsdiv_cb, ctx);
}
@ -219,7 +219,7 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
ctx->fck = fck;
return dispc_div_calc(ctx->pll->dss->dispc, fck,
return dispc_div_calc(ctx->dpi->dss->dispc, fck,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
@ -230,7 +230,7 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
unsigned long clkin;
memset(ctx, 0, sizeof(*ctx));
ctx->pll = dpi->pll;
ctx->dpi = dpi;
ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
clkin = clk_get_rate(dpi->pll->clkin);
@ -244,7 +244,7 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
pll_min = 0;
pll_max = 0;
return dss_pll_calc_a(ctx->pll, clkin,
return dss_pll_calc_a(ctx->dpi->pll, clkin,
pll_min, pll_max,
dpi_calc_pll_cb, ctx);
} else { /* DSS_PLL_TYPE_B */
@ -275,6 +275,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
bool ok;
memset(ctx, 0, sizeof(*ctx));
ctx->dpi = dpi;
if (pck > 1000 * i * i * i)
ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
else

View file

@ -140,6 +140,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
* https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
{ 0, 0, 0, 0, 0 },
};

View file

@ -5912,9 +5912,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
{
u32 lane_width;
u32 new_lane_width =
(radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
(radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
radeon_set_pcie_lanes(rdev, new_lane_width);

View file

@ -987,7 +987,7 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
#define azx_del_card_list(chip) /* NOP */
#endif /* CONFIG_PM */
#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
@ -1068,9 +1068,7 @@ static int azx_resume(struct device *dev)
trace_azx_resume(chip);
return 0;
}
#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
#ifdef CONFIG_PM_SLEEP
/* put codec down to D3 at hibernation for Intel SKL+;
* otherwise BIOS may still access the codec and screw up the driver
*/