mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: fix MISRA errors in nvgpu.common.mm
Rule 11.3 forbids pointer cast between two different object types. Rule 13.5 doesn't allow right hand operand of a logical operator to have persistent side effects. This patch fixes mentioned rules in nvgpu.common.mm. Jira NVGPU-3864 Change-Id: I08b7fb4d3fb623f14f8760a50648b39b3e53b233 Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2168522 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
a20739c1f6
commit
88ab1b389c
@@ -32,11 +32,13 @@ struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr)
|
|||||||
|
|
||||||
nvgpu_list_for_each_entry(vm_area, &vm->vm_area_list,
|
nvgpu_list_for_each_entry(vm_area, &vm->vm_area_list,
|
||||||
nvgpu_vm_area, vm_area_list) {
|
nvgpu_vm_area, vm_area_list) {
|
||||||
if (addr >= vm_area->addr &&
|
if (addr >= vm_area->addr) {
|
||||||
addr < nvgpu_safe_add_u64(vm_area->addr, vm_area->size)) {
|
if (addr < nvgpu_safe_add_u64(vm_area->addr,
|
||||||
|
vm_area->size)) {
|
||||||
return vm_area;
|
return vm_area;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@@ -73,22 +75,26 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Mapped area should fit inside va, if there's one */
|
/* Mapped area should fit inside va, if there's one */
|
||||||
if (vm_area != NULL && map_end > nvgpu_safe_add_u64(vm_area->addr,
|
if (vm_area != NULL) {
|
||||||
|
if (map_end > nvgpu_safe_add_u64(vm_area->addr,
|
||||||
vm_area->size)) {
|
vm_area->size)) {
|
||||||
nvgpu_warn(g, "fixed offset mapping size overflows va node");
|
nvgpu_warn(g,
|
||||||
|
"fixed offset mapping size overflows va node");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* check that this mapping does not collide with existing
|
/* check that this mapping does not collide with existing
|
||||||
* mappings by checking the buffer with the highest GPU VA
|
* mappings by checking the buffer with the highest GPU VA
|
||||||
* that is less than our buffer end */
|
* that is less than our buffer end */
|
||||||
buffer = nvgpu_vm_find_mapped_buf_less_than(
|
buffer = nvgpu_vm_find_mapped_buf_less_than(
|
||||||
vm, nvgpu_safe_add_u64(map_addr, map_size));
|
vm, nvgpu_safe_add_u64(map_addr, map_size));
|
||||||
if (buffer != NULL &&
|
if (buffer != NULL) {
|
||||||
nvgpu_safe_add_u64(buffer->addr, buffer->size) > map_addr) {
|
if (nvgpu_safe_add_u64(buffer->addr, buffer->size) > map_addr) {
|
||||||
nvgpu_warn(g, "overlapping buffer map requested");
|
nvgpu_warn(g, "overlapping buffer map requested");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
*pvm_area = vm_area;
|
*pvm_area = vm_area;
|
||||||
|
|
||||||
|
|||||||
@@ -276,7 +276,7 @@ static inline bool nvgpu_atomic64_sub_and_test_impl(long x, nvgpu_atomic64_t *v)
|
|||||||
({ \
|
({ \
|
||||||
typeof(*(p)) tmp = (old); \
|
typeof(*(p)) tmp = (old); \
|
||||||
\
|
\
|
||||||
(void) nvgpu_atomic_cmpxchg((nvgpu_atomic_t *) (p), tmp,\
|
(void) nvgpu_atomic_cmpxchg((nvgpu_atomic_t *)(void *)(p), tmp,\
|
||||||
(new)); \
|
(new)); \
|
||||||
tmp; \
|
tmp; \
|
||||||
})
|
})
|
||||||
|
|||||||
Reference in New Issue
Block a user