mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: change size related gpu_ops poniters
The return type of the function pointer *calc_global_ctx_buffer_size() is changed from int to u32 and all its implementations. The arg type of size in *set_big_page_size() is changed from int to u32 and all it implementations. These changes are necessary because size should be an unsigned value. JIRA NVGPU-992 Change-Id: I3e4cd1d83749777aa8588a44a48772e26f190c4d Signed-off-by: Sai Nikhil <snikhil@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1950503 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
bc1ee5a281
commit
f215026a8f
@@ -2449,7 +2449,8 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
|
|||||||
int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct gr_gk20a *gr = &g->gr;
|
struct gr_gk20a *gr = &g->gr;
|
||||||
int attr_buffer_size, err;
|
int err;
|
||||||
|
u32 attr_buffer_size;
|
||||||
|
|
||||||
u32 cb_buffer_size = gr->bundle_cb_default_size *
|
u32 cb_buffer_size = gr->bundle_cb_default_size *
|
||||||
gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
|
gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
|
||||||
@@ -2495,7 +2496,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size);
|
nvgpu_log_info(g, "attr_buffer_size : %u", attr_buffer_size);
|
||||||
|
|
||||||
err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE],
|
err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE],
|
||||||
attr_buffer_size);
|
attr_buffer_size);
|
||||||
|
|||||||
@@ -97,10 +97,10 @@ void gr_gm20b_cb_size_default(struct gk20a *g)
|
|||||||
gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
|
gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
|
||||||
}
|
}
|
||||||
|
|
||||||
int gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g)
|
u32 gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct gr_gk20a *gr = &g->gr;
|
struct gr_gk20a *gr = &g->gr;
|
||||||
int size;
|
u32 size;
|
||||||
|
|
||||||
gr->attrib_cb_size = gr->attrib_cb_default_size
|
gr->attrib_cb_size = gr->attrib_cb_default_size
|
||||||
+ (gr->attrib_cb_default_size >> 1);
|
+ (gr->attrib_cb_default_size >> 1);
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ void gm20a_gr_disable_rd_coalesce(struct gk20a *g);
|
|||||||
void gr_gm20b_init_gpc_mmu(struct gk20a *g);
|
void gr_gm20b_init_gpc_mmu(struct gk20a *g);
|
||||||
void gr_gm20b_bundle_cb_defaults(struct gk20a *g);
|
void gr_gm20b_bundle_cb_defaults(struct gk20a *g);
|
||||||
void gr_gm20b_cb_size_default(struct gk20a *g);
|
void gr_gm20b_cb_size_default(struct gk20a *g);
|
||||||
int gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g);
|
u32 gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g);
|
||||||
void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
|
void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
|
||||||
struct nvgpu_gr_ctx *ch_ctx,
|
struct nvgpu_gr_ctx *ch_ctx,
|
||||||
u64 addr, u64 size, bool patch);
|
u64 addr, u64 size, bool patch);
|
||||||
|
|||||||
@@ -31,13 +31,13 @@
|
|||||||
#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
|
#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
|
||||||
|
|
||||||
void gm20b_mm_set_big_page_size(struct gk20a *g,
|
void gm20b_mm_set_big_page_size(struct gk20a *g,
|
||||||
struct nvgpu_mem *mem, int size)
|
struct nvgpu_mem *mem, u32 size)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
nvgpu_log_info(g, "big page size %d\n", size);
|
nvgpu_log_info(g, "big page size %u\n", size);
|
||||||
val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w());
|
val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w());
|
||||||
val &= ~ram_in_big_page_size_m();
|
val &= ~ram_in_big_page_size_m();
|
||||||
|
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ struct gk20a;
|
|||||||
#define PDE_ADDR_END(x, y) ((x) | ((0x1UL << (y)) - 1))
|
#define PDE_ADDR_END(x, y) ((x) | ((0x1UL << (y)) - 1))
|
||||||
|
|
||||||
void gm20b_mm_set_big_page_size(struct gk20a *g,
|
void gm20b_mm_set_big_page_size(struct gk20a *g,
|
||||||
struct nvgpu_mem *mem, int size);
|
struct nvgpu_mem *mem, u32 size);
|
||||||
u32 gm20b_mm_get_big_page_sizes(void);
|
u32 gm20b_mm_get_big_page_sizes(void);
|
||||||
u32 gm20b_mm_get_default_big_page_size(void);
|
u32 gm20b_mm_get_default_big_page_size(void);
|
||||||
bool gm20b_mm_support_sparse(struct gk20a *g);
|
bool gm20b_mm_support_sparse(struct gk20a *g);
|
||||||
|
|||||||
@@ -583,10 +583,10 @@ u32 gr_gp10b_pagepool_default_size(struct gk20a *g)
|
|||||||
return gr_scc_pagepool_total_pages_hwmax_value_v();
|
return gr_scc_pagepool_total_pages_hwmax_value_v();
|
||||||
}
|
}
|
||||||
|
|
||||||
int gr_gp10b_calc_global_ctx_buffer_size(struct gk20a *g)
|
u32 gr_gp10b_calc_global_ctx_buffer_size(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct gr_gk20a *gr = &g->gr;
|
struct gr_gk20a *gr = &g->gr;
|
||||||
int size;
|
u32 size;
|
||||||
|
|
||||||
gr->attrib_cb_size = gr->attrib_cb_default_size;
|
gr->attrib_cb_size = gr->attrib_cb_default_size;
|
||||||
gr->alpha_cb_size = gr->alpha_cb_default_size;
|
gr->alpha_cb_size = gr->alpha_cb_default_size;
|
||||||
@@ -1479,10 +1479,10 @@ void gr_gp10b_commit_global_attrib_cb(struct gk20a *g,
|
|||||||
struct nvgpu_gr_ctx *gr_ctx,
|
struct nvgpu_gr_ctx *gr_ctx,
|
||||||
u64 addr, bool patch)
|
u64 addr, bool patch)
|
||||||
{
|
{
|
||||||
int attrBufferSize;
|
u32 attrBufferSize;
|
||||||
|
|
||||||
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
|
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
|
||||||
attrBufferSize = gr_ctx->betacb_ctxsw_buffer.size;
|
attrBufferSize = U32(gr_ctx->betacb_ctxsw_buffer.size);
|
||||||
} else {
|
} else {
|
||||||
attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
|
attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ int gr_gp10b_add_zbc_color(struct gk20a *g, struct gr_gk20a *gr,
|
|||||||
int gr_gp10b_add_zbc_depth(struct gk20a *g, struct gr_gk20a *gr,
|
int gr_gp10b_add_zbc_depth(struct gk20a *g, struct gr_gk20a *gr,
|
||||||
struct zbc_entry *depth_val, u32 index);
|
struct zbc_entry *depth_val, u32 index);
|
||||||
u32 gr_gp10b_pagepool_default_size(struct gk20a *g);
|
u32 gr_gp10b_pagepool_default_size(struct gk20a *g);
|
||||||
int gr_gp10b_calc_global_ctx_buffer_size(struct gk20a *g);
|
u32 gr_gp10b_calc_global_ctx_buffer_size(struct gk20a *g);
|
||||||
void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data);
|
void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data);
|
||||||
void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data);
|
void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data);
|
||||||
int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr,
|
int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr,
|
||||||
|
|||||||
@@ -1164,10 +1164,10 @@ u32 gr_gv11b_pagepool_default_size(struct gk20a *g)
|
|||||||
return gr_scc_pagepool_total_pages_hwmax_value_v();
|
return gr_scc_pagepool_total_pages_hwmax_value_v();
|
||||||
}
|
}
|
||||||
|
|
||||||
int gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g)
|
u32 gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct gr_gk20a *gr = &g->gr;
|
struct gr_gk20a *gr = &g->gr;
|
||||||
int size;
|
u32 size;
|
||||||
|
|
||||||
gr->attrib_cb_size = gr->attrib_cb_default_size;
|
gr->attrib_cb_size = gr->attrib_cb_default_size;
|
||||||
gr->alpha_cb_size = gr->alpha_cb_default_size;
|
gr->alpha_cb_size = gr->alpha_cb_default_size;
|
||||||
@@ -2070,10 +2070,10 @@ void gr_gv11b_commit_global_attrib_cb(struct gk20a *g,
|
|||||||
struct nvgpu_gr_ctx *gr_ctx,
|
struct nvgpu_gr_ctx *gr_ctx,
|
||||||
u64 addr, bool patch)
|
u64 addr, bool patch)
|
||||||
{
|
{
|
||||||
int attrBufferSize;
|
u32 attrBufferSize;
|
||||||
|
|
||||||
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
|
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
|
||||||
attrBufferSize = gr_ctx->betacb_ctxsw_buffer.size;
|
attrBufferSize = U32(gr_ctx->betacb_ctxsw_buffer.size);
|
||||||
} else {
|
} else {
|
||||||
attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
|
attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -121,7 +121,7 @@ int gr_gv11b_load_stencil_default_tbl(struct gk20a *g,
|
|||||||
struct gr_gk20a *gr);
|
struct gr_gk20a *gr);
|
||||||
int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr);
|
int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr);
|
||||||
u32 gr_gv11b_pagepool_default_size(struct gk20a *g);
|
u32 gr_gv11b_pagepool_default_size(struct gk20a *g);
|
||||||
int gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g);
|
u32 gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g);
|
||||||
int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr,
|
int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr,
|
||||||
u32 class_num, u32 offset, u32 data);
|
u32 class_num, u32 offset, u32 data);
|
||||||
void gr_gv11b_bundle_cb_defaults(struct gk20a *g);
|
void gr_gv11b_bundle_cb_defaults(struct gk20a *g);
|
||||||
|
|||||||
@@ -234,7 +234,7 @@ struct gpu_ops {
|
|||||||
void (*access_smpc_reg)(struct gk20a *g, u32 quad, u32 offset);
|
void (*access_smpc_reg)(struct gk20a *g, u32 quad, u32 offset);
|
||||||
void (*bundle_cb_defaults)(struct gk20a *g);
|
void (*bundle_cb_defaults)(struct gk20a *g);
|
||||||
void (*cb_size_default)(struct gk20a *g);
|
void (*cb_size_default)(struct gk20a *g);
|
||||||
int (*calc_global_ctx_buffer_size)(struct gk20a *g);
|
u32 (*calc_global_ctx_buffer_size)(struct gk20a *g);
|
||||||
void (*commit_global_attrib_cb)(struct gk20a *g,
|
void (*commit_global_attrib_cb)(struct gk20a *g,
|
||||||
struct nvgpu_gr_ctx *ch_ctx,
|
struct nvgpu_gr_ctx *ch_ctx,
|
||||||
u64 addr, bool patch);
|
u64 addr, bool patch);
|
||||||
@@ -976,7 +976,7 @@ struct gpu_ops {
|
|||||||
void (*l2_flush)(struct gk20a *g, bool invalidate);
|
void (*l2_flush)(struct gk20a *g, bool invalidate);
|
||||||
void (*cbc_clean)(struct gk20a *g);
|
void (*cbc_clean)(struct gk20a *g);
|
||||||
void (*set_big_page_size)(struct gk20a *g,
|
void (*set_big_page_size)(struct gk20a *g,
|
||||||
struct nvgpu_mem *mem, int size);
|
struct nvgpu_mem *mem, u32 size);
|
||||||
u32 (*get_big_page_sizes)(void);
|
u32 (*get_big_page_sizes)(void);
|
||||||
u32 (*get_default_big_page_size)(void);
|
u32 (*get_default_big_page_size)(void);
|
||||||
u32 (*get_iommu_bit)(struct gk20a *g);
|
u32 (*get_iommu_bit)(struct gk20a *g);
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g)
|
|||||||
static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
|
static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct gr_gk20a *gr = &g->gr;
|
struct gr_gk20a *gr = &g->gr;
|
||||||
int attr_buffer_size;
|
u32 attr_buffer_size;
|
||||||
|
|
||||||
u32 cb_buffer_size = gr->bundle_cb_default_size *
|
u32 cb_buffer_size = gr->bundle_cb_default_size *
|
||||||
gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
|
gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
|
||||||
@@ -151,7 +151,7 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
|
|||||||
nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size);
|
nvgpu_log_info(g, "pagepool_buffer_size : %d", pagepool_buffer_size);
|
||||||
gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
|
gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
|
||||||
|
|
||||||
nvgpu_log_info(g, "attr_buffer_size : %d", attr_buffer_size);
|
nvgpu_log_info(g, "attr_buffer_size : %u", attr_buffer_size);
|
||||||
gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
|
gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
|
||||||
|
|
||||||
nvgpu_log_info(g, "priv access map size : %d",
|
nvgpu_log_info(g, "priv access map size : %d",
|
||||||
|
|||||||
Reference in New Issue
Block a user