mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: mm: fix CERT-C INT30 violations in page_table
Add wrap checks for CERT-C INT30 in page_table.c. JIRA NVGPU-3515 Change-Id: I102364c82d2b36ecbc6f7f53bce9a8ba71875f15 Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2125025 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
377e1e8f36
commit
067ca56db7
@@ -35,6 +35,7 @@
|
|||||||
#include <nvgpu/sizes.h>
|
#include <nvgpu/sizes.h>
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
#include <nvgpu/gk20a.h>
|
#include <nvgpu/gk20a.h>
|
||||||
|
#include <nvgpu/safe_ops.h>
|
||||||
|
|
||||||
|
|
||||||
#define nvgpu_gmmu_dbg(g, attrs, fmt, args...) \
|
#define nvgpu_gmmu_dbg(g, attrs, fmt, args...) \
|
||||||
@@ -230,7 +231,13 @@ static u32 pd_entries(const struct gk20a_mmu_level *l,
|
|||||||
* used to index the page directory. That is simply 2 raised to the
|
* used to index the page directory. That is simply 2 raised to the
|
||||||
* number of bits.
|
* number of bits.
|
||||||
*/
|
*/
|
||||||
return BIT32(l->hi_bit[attrs->pgsz] - l->lo_bit[attrs->pgsz] + 1U);
|
|
||||||
|
u32 bit;
|
||||||
|
|
||||||
|
bit = nvgpu_safe_sub_u32(l->hi_bit[attrs->pgsz],
|
||||||
|
l->lo_bit[attrs->pgsz]);
|
||||||
|
bit = nvgpu_safe_add_u32(bit, 1U);
|
||||||
|
return BIT32(bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -239,7 +246,7 @@ static u32 pd_entries(const struct gk20a_mmu_level *l,
|
|||||||
static u32 pd_size(const struct gk20a_mmu_level *l,
|
static u32 pd_size(const struct gk20a_mmu_level *l,
|
||||||
struct nvgpu_gmmu_attrs *attrs)
|
struct nvgpu_gmmu_attrs *attrs)
|
||||||
{
|
{
|
||||||
return pd_entries(l, attrs) * l->entry_size;
|
return nvgpu_safe_mult_u32(pd_entries(l, attrs), l->entry_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -290,10 +297,13 @@ static int pd_allocate(struct vm_gk20a *vm,
|
|||||||
static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt,
|
static u32 pd_index(const struct gk20a_mmu_level *l, u64 virt,
|
||||||
struct nvgpu_gmmu_attrs *attrs)
|
struct nvgpu_gmmu_attrs *attrs)
|
||||||
{
|
{
|
||||||
u64 pd_mask = (1ULL << ((u64)l->hi_bit[attrs->pgsz] + 1U)) - 1ULL;
|
u64 pd_mask;
|
||||||
u32 pd_shift = l->lo_bit[attrs->pgsz];
|
u32 pd_shift = l->lo_bit[attrs->pgsz];
|
||||||
u64 tmp_index;
|
u64 tmp_index;
|
||||||
|
|
||||||
|
pd_mask = BIT64(nvgpu_safe_add_u64((u64)l->hi_bit[attrs->pgsz], 1ULL));
|
||||||
|
pd_mask = nvgpu_safe_sub_u64(pd_mask, 1ULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For convenience we don't bother computing the lower bound of the
|
* For convenience we don't bother computing the lower bound of the
|
||||||
* mask; it's easier to just shift it off.
|
* mask; it's easier to just shift it off.
|
||||||
@@ -345,8 +355,9 @@ static int pd_allocate_children(struct vm_gk20a *vm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
pd->num_entries = pd_entries(l, attrs);
|
pd->num_entries = pd_entries(l, attrs);
|
||||||
pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) *
|
pd->entries = nvgpu_vzalloc(g,
|
||||||
(unsigned long)pd->num_entries);
|
nvgpu_safe_mult_u64(sizeof(struct nvgpu_gmmu_pd),
|
||||||
|
(unsigned long)pd->num_entries));
|
||||||
if (pd->entries == NULL) {
|
if (pd->entries == NULL) {
|
||||||
pd->num_entries = 0;
|
pd->num_entries = 0;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -417,13 +428,16 @@ static int nvgpu_set_pd_level(struct vm_gk20a *vm,
|
|||||||
u32 pd_idx = pd_index(l, virt_addr, attrs);
|
u32 pd_idx = pd_index(l, virt_addr, attrs);
|
||||||
u64 chunk_size;
|
u64 chunk_size;
|
||||||
u64 target_addr;
|
u64 target_addr;
|
||||||
|
u64 tmp_len;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Truncate the pde_range when the virtual address does not
|
* Truncate the pde_range when the virtual address does not
|
||||||
* start at a PDE boundary.
|
* start at a PDE boundary.
|
||||||
*/
|
*/
|
||||||
chunk_size = min(length,
|
nvgpu_assert(pde_range >= 1ULL);
|
||||||
pde_range - (virt_addr & (pde_range - 1U)));
|
tmp_len = nvgpu_safe_sub_u64(pde_range,
|
||||||
|
virt_addr & (pde_range - 1U));
|
||||||
|
chunk_size = min(length, tmp_len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the next level has an update_entry function then we know
|
* If the next level has an update_entry function then we know
|
||||||
@@ -480,7 +494,7 @@ static int nvgpu_set_pd_level(struct vm_gk20a *vm,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virt_addr += chunk_size;
|
virt_addr = nvgpu_safe_add_u64(virt_addr, chunk_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only add to phys_addr if it's non-zero. A zero value implies
|
* Only add to phys_addr if it's non-zero. A zero value implies
|
||||||
@@ -547,7 +561,7 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
|
|||||||
is_iommuable && sgt_is_iommuable) {
|
is_iommuable && sgt_is_iommuable) {
|
||||||
u64 io_addr = nvgpu_sgt_get_gpu_addr(g, sgt, sgt->sgl, attrs);
|
u64 io_addr = nvgpu_sgt_get_gpu_addr(g, sgt, sgt->sgl, attrs);
|
||||||
|
|
||||||
io_addr += space_to_skip;
|
io_addr = nvgpu_safe_add_u64(io_addr, space_to_skip);
|
||||||
|
|
||||||
err = nvgpu_set_pd_level(vm, &vm->pdb,
|
err = nvgpu_set_pd_level(vm, &vm->pdb,
|
||||||
0,
|
0,
|
||||||
@@ -620,8 +634,10 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
|
|||||||
*/
|
*/
|
||||||
phys_addr = nvgpu_sgt_ipa_to_pa(g, sgt, sgl, ipa_addr,
|
phys_addr = nvgpu_sgt_ipa_to_pa(g, sgt, sgl, ipa_addr,
|
||||||
&phys_length);
|
&phys_length);
|
||||||
phys_addr = g->ops.mm.gmmu.gpu_phys_addr(g, attrs, phys_addr)
|
phys_addr = nvgpu_safe_add_u64(
|
||||||
+ space_to_skip;
|
g->ops.mm.gmmu.gpu_phys_addr(g, attrs,
|
||||||
|
phys_addr),
|
||||||
|
space_to_skip);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For virtualized OSes when phys_length is less than
|
* For virtualized OSes when phys_length is less than
|
||||||
@@ -630,7 +646,8 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
|
|||||||
*/
|
*/
|
||||||
if (space_to_skip >= phys_length) {
|
if (space_to_skip >= phys_length) {
|
||||||
space_to_skip -= phys_length;
|
space_to_skip -= phys_length;
|
||||||
ipa_addr += phys_length;
|
ipa_addr = nvgpu_safe_add_u64(ipa_addr,
|
||||||
|
phys_length);
|
||||||
sgl_length -= phys_length;
|
sgl_length -= phys_length;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -652,10 +669,16 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
|
|||||||
/*
|
/*
|
||||||
* Update the map pointer and the remaining length.
|
* Update the map pointer and the remaining length.
|
||||||
*/
|
*/
|
||||||
virt_addr += mapped_sgl_length;
|
virt_addr = nvgpu_safe_add_u64(virt_addr,
|
||||||
length -= mapped_sgl_length;
|
mapped_sgl_length);
|
||||||
sgl_length -= mapped_sgl_length + space_to_skip;
|
length = nvgpu_safe_sub_u64(length,
|
||||||
ipa_addr += mapped_sgl_length + space_to_skip;
|
mapped_sgl_length);
|
||||||
|
sgl_length = nvgpu_safe_sub_u64(sgl_length,
|
||||||
|
nvgpu_safe_add_u64(mapped_sgl_length,
|
||||||
|
space_to_skip));
|
||||||
|
ipa_addr = nvgpu_safe_add_u64(ipa_addr,
|
||||||
|
nvgpu_safe_add_u64(mapped_sgl_length,
|
||||||
|
space_to_skip));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Space has been skipped so zero this for future
|
* Space has been skipped so zero this for future
|
||||||
@@ -706,7 +729,8 @@ static int nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
|
|||||||
|
|
||||||
page_size = vm->gmmu_page_sizes[attrs->pgsz];
|
page_size = vm->gmmu_page_sizes[attrs->pgsz];
|
||||||
|
|
||||||
if ((space_to_skip & (U64(page_size) - U64(1))) != 0ULL) {
|
if ((page_size == 0U) ||
|
||||||
|
(space_to_skip & (U64(page_size) - U64(1))) != 0ULL) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -806,7 +830,9 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm,
|
|||||||
* boundaries.
|
* boundaries.
|
||||||
*/
|
*/
|
||||||
if (attrs.ctag != 0ULL) {
|
if (attrs.ctag != 0ULL) {
|
||||||
attrs.ctag += buffer_offset & (ctag_granularity - U64(1));
|
nvgpu_assert(ctag_granularity >= 1ULL);
|
||||||
|
attrs.ctag = nvgpu_safe_add_u64(attrs.ctag,
|
||||||
|
buffer_offset & (ctag_granularity - U64(1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
|
attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
|
||||||
@@ -981,14 +1007,17 @@ static int nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
* since the PD may be located at an offset other than 0
|
* since the PD may be located at an offset other than 0
|
||||||
* (due to PD packing).
|
* (due to PD packing).
|
||||||
*/
|
*/
|
||||||
pte_base = (u32)(pd->mem_offs / sizeof(u32)) +
|
pte_base = nvgpu_safe_add_u32(
|
||||||
nvgpu_pd_offset_from_index(l, pd_idx);
|
(u32)(pd->mem_offs / sizeof(u32)),
|
||||||
|
nvgpu_pd_offset_from_index(l, pd_idx));
|
||||||
pte_size = (u32)(l->entry_size / sizeof(u32));
|
pte_size = (u32)(l->entry_size / sizeof(u32));
|
||||||
|
|
||||||
if (data != NULL) {
|
if (data != NULL) {
|
||||||
for (i = 0; i < pte_size; i++) {
|
for (i = 0; i < pte_size; i++) {
|
||||||
|
u32 tmp_word = nvgpu_safe_add_u32(i,
|
||||||
|
pte_base);
|
||||||
data[i] = nvgpu_mem_rd32(g, pd->mem,
|
data[i] = nvgpu_mem_rd32(g, pd->mem,
|
||||||
pte_base + i);
|
tmp_word);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user