Revert "gpu: nvgpu: handle falcon copy pointer alignment for misra 11.3 deviation"

This patch reverts the following commit 13a7ef2cc7

The bios devinit for tu104 encountered the unaligned buffer scenario.
However bios devinit functionality is now removed from nvgpu. Other
than that there are no firmwares where we expect the input/output
buffer addresses to be un-aligned, hence removing the logic added
to handle un-aligned addresses.

JIRA NVGPU-3271

Change-Id: Ifd24cc5b50b9d2548878436befb2220e7bf02ed4
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2161735
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-07-26 11:07:04 +05:30
committed by mobile promotions
parent 77051a8c86
commit 715f29ea9f
4 changed files with 17 additions and 80 deletions

View File

@@ -77,13 +77,3 @@ int nvgpu_strnadd_u32(char *dst, const u32 value, size_t size, u32 radix)
return n; return n;
} }
bool nvgpu_mem_is_word_aligned(struct gk20a *g, u8 *addr)
{
if ((unsigned long)addr % 4UL != 0UL) {
nvgpu_log_info(g, "addr (%p) not 4-byte aligned", addr);
return false;
}
return true;
}

View File

@@ -88,18 +88,10 @@ int gk20a_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
nvgpu_writel(g, base_addr + falcon_falcon_dmemc_r(port), nvgpu_writel(g, base_addr + falcon_falcon_dmemc_r(port),
src | falcon_falcon_dmemc_aincr_f(1)); src | falcon_falcon_dmemc_aincr_f(1));
if (unlikely(!nvgpu_mem_is_word_aligned(g, dst))) {
for (i = 0; i < words; i++) {
data = nvgpu_readl(g,
base_addr + falcon_falcon_dmemd_r(port));
nvgpu_memcpy(&dst[i * 4U], (u8 *)&data, 4);
}
} else {
for (i = 0; i < words; i++) { for (i = 0; i < words; i++) {
dst_u32[i] = nvgpu_readl(g, dst_u32[i] = nvgpu_readl(g,
base_addr + falcon_falcon_dmemd_r(port)); base_addr + falcon_falcon_dmemd_r(port));
} }
}
if (bytes > 0U) { if (bytes > 0U) {
data = nvgpu_readl(g, base_addr + falcon_falcon_dmemd_r(port)); data = nvgpu_readl(g, base_addr + falcon_falcon_dmemd_r(port));
@@ -135,18 +127,10 @@ int gk20a_falcon_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
falcon_falcon_imemc_blk_f(blk) | falcon_falcon_imemc_blk_f(blk) |
falcon_falcon_dmemc_aincr_f(1)); falcon_falcon_dmemc_aincr_f(1));
if (unlikely(!nvgpu_mem_is_word_aligned(g, dst))) {
for (i = 0; i < words; i++) {
data = nvgpu_readl(g,
base_addr + falcon_falcon_imemd_r(port));
nvgpu_memcpy(&dst[i * 4U], (u8 *)&data, 4);
}
} else {
for (i = 0; i < words; i++) { for (i = 0; i < words; i++) {
dst_u32[i] = nvgpu_readl(g, dst_u32[i] = nvgpu_readl(g,
base_addr + falcon_falcon_imemd_r(port)); base_addr + falcon_falcon_imemd_r(port));
} }
}
if (bytes > 0U) { if (bytes > 0U) {
data = nvgpu_readl(g, base_addr + falcon_falcon_imemd_r(port)); data = nvgpu_readl(g, base_addr + falcon_falcon_imemd_r(port));

View File

@@ -149,18 +149,10 @@ int gk20a_falcon_copy_to_dmem(struct nvgpu_falcon *flcn,
nvgpu_writel(g, base_addr + falcon_falcon_dmemc_r(port), nvgpu_writel(g, base_addr + falcon_falcon_dmemc_r(port),
dst | falcon_falcon_dmemc_aincw_f(1)); dst | falcon_falcon_dmemc_aincw_f(1));
if (unlikely(!nvgpu_mem_is_word_aligned(g, src))) {
for (i = 0; i < words; i++) {
nvgpu_memcpy((u8 *)&data, &src[i * 4U], 4);
nvgpu_writel(g, base_addr + falcon_falcon_dmemd_r(port),
data);
}
} else {
for (i = 0; i < words; i++) { for (i = 0; i < words; i++) {
nvgpu_writel(g, base_addr + falcon_falcon_dmemd_r(port), nvgpu_writel(g, base_addr + falcon_falcon_dmemd_r(port),
src_u32[i]); src_u32[i]);
} }
}
if (bytes > 0U) { if (bytes > 0U) {
data = 0; data = 0;
@@ -186,7 +178,6 @@ int gk20a_falcon_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
u32 base_addr = flcn->flcn_base; u32 base_addr = flcn->flcn_base;
u32 *src_u32 = (u32 *)src; u32 *src_u32 = (u32 *)src;
u32 words = 0; u32 words = 0;
u32 data = 0;
u32 blk = 0; u32 blk = 0;
u32 i = 0; u32 i = 0;
@@ -205,22 +196,6 @@ int gk20a_falcon_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
falcon_falcon_imemc_aincw_f(1) | falcon_falcon_imemc_aincw_f(1) |
falcon_falcon_imemc_secure_f(sec ? 1U : 0U)); falcon_falcon_imemc_secure_f(sec ? 1U : 0U));
if (unlikely(!nvgpu_mem_is_word_aligned(g, src))) {
for (i = 0U; i < words; i++) {
if (i % 64U == 0U) {
/* tag is always 256B aligned */
nvgpu_writel(g,
base_addr + falcon_falcon_imemt_r(0),
tag);
tag++;
}
nvgpu_memcpy((u8 *)&data, &src[i * 4U], 4);
nvgpu_writel(g,
base_addr + falcon_falcon_imemd_r(port),
data);
}
} else {
for (i = 0U; i < words; i++) { for (i = 0U; i < words; i++) {
if (i % 64U == 0U) { if (i % 64U == 0U) {
/* tag is always 256B aligned */ /* tag is always 256B aligned */
@@ -233,7 +208,6 @@ int gk20a_falcon_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
nvgpu_writel(g, base_addr + falcon_falcon_imemd_r(port), nvgpu_writel(g, base_addr + falcon_falcon_imemd_r(port),
src_u32[i]); src_u32[i]);
} }
}
/* WARNING : setting remaining bytes in block to 0x0 */ /* WARNING : setting remaining bytes in block to 0x0 */
while (i % 64U != 0U) { while (i % 64U != 0U) {

View File

@@ -29,8 +29,6 @@
#include <linux/string.h> #include <linux/string.h>
#endif #endif
struct gk20a;
/** /**
* nvgpu_memcpy - Copy memory buffer * nvgpu_memcpy - Copy memory buffer
* *
@@ -65,13 +63,4 @@ int nvgpu_memcmp(const u8 *b1, const u8 *b2, size_t n);
*/ */
int nvgpu_strnadd_u32(char *dst, const u32 value, size_t size, u32 radix); int nvgpu_strnadd_u32(char *dst, const u32 value, size_t size, u32 radix);
/**
* nvgpu_mem_is_word_aligned - Check that memory address is word (4-byte)
* aligned.
*
* @g - struct gk20a.
* @addr - memory address.
*/
bool nvgpu_mem_is_word_aligned(struct gk20a *g, u8 *addr);
#endif /* NVGPU_STRING_H */ #endif /* NVGPU_STRING_H */