drm/amdgpu: have bos for PDs/PTS cpu accessible when kfd uses cpu to update vm

When kfd uses cpu to update vm iterates all current PDs/PTs bos, adds
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED flag and kmap them to kernel virtual
address space before kfd updates the vm that was created by gfx.

Signed-off-by: Xiaogang Chen <Xiaogang.Chen@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Xiaogang Chen 2023-06-30 11:38:35 -05:00 committed by Alex Deucher
parent 9041b53a59
commit eb58ad143d
4 changed files with 35 additions and 7 deletions

View File

@ -2279,16 +2279,13 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
goto unreserve_bo;
vm->update_funcs = &amdgpu_vm_cpu_funcs;
r = amdgpu_vm_pt_map_tables(adev, vm);
if (r)
goto unreserve_bo;
} else {
vm->update_funcs = &amdgpu_vm_sdma_funcs;
}
/*
* Make sure root PD gets mapped. As vm_update_mode could be changed
* when turning a GFX VM into a compute VM.
*/
r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo));
if (r)
goto unreserve_bo;
dma_fence_put(vm->last_update);
vm->last_update = dma_fence_get_stub();

View File

@ -497,6 +497,8 @@ void amdgpu_vm_pt_free_work(struct work_struct *work);
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
#endif
int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
/**
* amdgpu_vm_tlb_seq - return tlb flush sequence number
* @vm: the amdgpu_vm structure to query

View File

@ -31,6 +31,7 @@
*/
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
{
table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
return amdgpu_bo_kmap(&table->bo, NULL);
}

View File

@ -1075,3 +1075,31 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
return 0;
}
/**
* amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
* @adev: amdgpu device structure
* @vm: amdgpu vm structure
*
* make root page directory and everything below it cpu accessible.
*/
int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_bo_base *entry;
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
struct amdgpu_bo_vm *bo;
int r;
if (entry->bo) {
bo = to_amdgpu_bo_vm(entry->bo);
r = vm->update_funcs->map_table(bo);
if (r)
return r;
}
}
return 0;
}