gpu: nvgpu: ce: check address ranges before exec

The source and destination addresses are masked to low 40 bits only.
Make sure that the input params don't cross that; it would mean a bug
somewhere in the caller side. Silently truncating values could cause
unexpected behaviour, but no device even has that much memory.

Also rename the src_buf and dst_buf to src_paddr and dst_paddr to
emphasize that the addresses are gpu physical.

Jira NVGPU-5172

Change-Id: I30653bf93791517991d04e4ba43220b5b541f581
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2402031
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2020-08-21 15:35:11 +03:00
committed by Alex Waterman
parent aafc9a4511
commit 1117ea1286
3 changed files with 30 additions and 17 deletions

View File

@@ -52,8 +52,8 @@ static inline u32 nvgpu_ce_get_valid_launch_flags(struct gk20a *g,
int nvgpu_ce_execute_ops(struct gk20a *g,
u32 ce_ctx_id,
u64 src_buf,
u64 dst_buf,
u64 src_paddr,
u64 dst_paddr,
u64 size,
u32 payload,
u32 launch_flags,
@@ -90,6 +90,16 @@ int nvgpu_ce_execute_ops(struct gk20a *g,
goto end;
}
if (src_paddr > NVGPU_CE_MAX_ADDRESS) {
ret = -EINVAL;
goto end;
}
if (dst_paddr > NVGPU_CE_MAX_ADDRESS) {
ret = -EINVAL;
goto end;
}
nvgpu_mutex_acquire(&ce_app->app_mutex);
nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save,
@@ -140,8 +150,8 @@ int nvgpu_ce_execute_ops(struct gk20a *g,
(u64)(cmd_buf_read_offset * sizeof(u32)));
dma_copy_class = g->ops.get_litter_value(g, GPU_LIT_DMA_COPY_CLASS);
method_size = nvgpu_ce_prepare_submit(src_buf,
dst_buf,
method_size = nvgpu_ce_prepare_submit(src_paddr,
dst_paddr,
size,
&cmd_buf_cpu_va[cmd_buf_read_offset],
payload,
@@ -235,7 +245,7 @@ static void nvgpu_ce_delete_gpu_context_locked(struct nvgpu_ce_gpu_ctx *ce_ctx)
}
static u32 nvgpu_prepare_ce_op(u32 *cmd_buf_cpu_va,
u64 src_buf, u64 dst_buf,
u64 src_paddr, u64 dst_paddr,
u32 width, u32 height, u32 payload,
bool mode_transfer, u32 launch_flags)
{
@@ -245,9 +255,9 @@ static u32 nvgpu_prepare_ce_op(u32 *cmd_buf_cpu_va,
if (mode_transfer) {
/* setup the source */
cmd_buf_cpu_va[methodSize++] = 0x20028100;
cmd_buf_cpu_va[methodSize++] = (u64_hi32(src_buf) &
cmd_buf_cpu_va[methodSize++] = (u64_hi32(src_paddr) &
NVGPU_CE_UPPER_ADDRESS_OFFSET_MASK);
cmd_buf_cpu_va[methodSize++] = (u64_lo32(src_buf) &
cmd_buf_cpu_va[methodSize++] = (u64_lo32(src_paddr) &
NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK);
cmd_buf_cpu_va[methodSize++] = 0x20018098;
@@ -275,9 +285,9 @@ static u32 nvgpu_prepare_ce_op(u32 *cmd_buf_cpu_va,
/* setup the destination/output */
cmd_buf_cpu_va[methodSize++] = 0x20068102;
cmd_buf_cpu_va[methodSize++] = (u64_hi32(dst_buf) &
cmd_buf_cpu_va[methodSize++] = (u64_hi32(dst_paddr) &
NVGPU_CE_UPPER_ADDRESS_OFFSET_MASK);
cmd_buf_cpu_va[methodSize++] = (u64_lo32(dst_buf) &
cmd_buf_cpu_va[methodSize++] = (u64_lo32(dst_paddr) &
NVGPU_CE_LOWER_ADDRESS_OFFSET_MASK);
/* Pitch in/out */
cmd_buf_cpu_va[methodSize++] = width;
@@ -318,8 +328,8 @@ static u32 nvgpu_prepare_ce_op(u32 *cmd_buf_cpu_va,
return methodSize;
}
u32 nvgpu_ce_prepare_submit(u64 src_buf,
u64 dst_buf,
u32 nvgpu_ce_prepare_submit(u64 src_paddr,
u64 dst_paddr,
u64 size,
u32 *cmd_buf_cpu_va,
u32 payload,
@@ -364,14 +374,14 @@ u32 nvgpu_ce_prepare_submit(u64 src_buf,
if (low != 0U) {
/* do the low bytes in one long line */
methodSize += nvgpu_prepare_ce_op(&cmd_buf_cpu_va[methodSize],
src_buf, dst_buf,
src_paddr, dst_paddr,
nvgpu_safe_cast_u64_to_u32(low), 1,
payload, mode_transfer, launch_flags);
}
if (hi != 0U) {
/* do the high bytes in many 2G lines */
methodSize += nvgpu_prepare_ce_op(&cmd_buf_cpu_va[methodSize],
src_buf + low, dst_buf + low,
src_paddr + low, dst_paddr + low,
0x80000000ULL, nvgpu_safe_cast_u64_to_u32(hi),
payload, mode_transfer, launch_flags);
}