mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-07 09:55:19 +02:00
drm/amdgpu/vcn: identify unified queue in sw init
commit ecfa23c8df
upstream.
Determine whether VCN using unified queue in sw_init, instead of calling
functions later on.
v2: fix coding style
Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Ruijing Dong <ruijing.dong@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e0aeb26b04
commit
44bb8f18a6
|
@ -135,6 +135,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
/* from vcn4 and above, only unified queue is used */
|
||||
adev->vcn.using_unified_queue =
|
||||
adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0);
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
|
||||
|
||||
|
@ -259,18 +263,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* from vcn4 and above, only unified queue is used */
|
||||
static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool ret = false;
|
||||
|
||||
if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
|
||||
ret = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
|
||||
{
|
||||
bool ret = false;
|
||||
|
@ -707,12 +699,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
|
|||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
|
||||
bool sq = amdgpu_vcn_using_unified_queue(ring);
|
||||
uint32_t *ib_checksum;
|
||||
uint32_t ib_pack_in_dw;
|
||||
int i, r;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
|
@ -725,7 +716,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
|
|||
ib->length_dw = 0;
|
||||
|
||||
/* single queue headers */
|
||||
if (sq) {
|
||||
if (adev->vcn.using_unified_queue) {
|
||||
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
|
||||
+ 4 + 2; /* engine info + decoding ib in dw */
|
||||
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
|
||||
|
@ -744,7 +735,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
|
||||
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
|
@ -834,15 +825,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
struct dma_fence **fence)
|
||||
{
|
||||
unsigned int ib_size_dw = 16;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint32_t *ib_checksum = NULL;
|
||||
uint64_t addr;
|
||||
bool sq = amdgpu_vcn_using_unified_queue(ring);
|
||||
int i, r;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
|
@ -856,7 +847,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
|
||||
ib->length_dw = 0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
|
@ -878,7 +869,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
|
||||
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
|
@ -901,15 +892,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
struct dma_fence **fence)
|
||||
{
|
||||
unsigned int ib_size_dw = 16;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint32_t *ib_checksum = NULL;
|
||||
uint64_t addr;
|
||||
bool sq = amdgpu_vcn_using_unified_queue(ring);
|
||||
int i, r;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
|
@ -923,7 +914,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
|
||||
ib->length_dw = 0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
|
@ -945,7 +936,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
|
||||
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
|
|
|
@ -284,6 +284,7 @@ struct amdgpu_vcn {
|
|||
|
||||
uint16_t inst_mask;
|
||||
uint8_t num_inst_per_aid;
|
||||
bool using_unified_queue;
|
||||
};
|
||||
|
||||
struct amdgpu_fw_shared_rb_ptrs_struct {
|
||||
|
|
Loading…
Reference in New Issue
Block a user