mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
Created class unit under hal and moved all valid class check related functionality to this unit. Moved all class defs from gr to a new header include/nvgpu/class.h. Moved following hals from gr to newly created class unit: bool (*is_valid_class)(struct gk20a *g, u32 class_num); --> bool (*is_valid)(u32 class_num); bool (*is_valid_gfx_class)(struct gk20a *g, u32 class_num); --> bool (*is_valid_gfx)(u32 class_num); bool (*is_valid_compute_class)(struct gk20a *g, u32 class_num); --> bool (*is_valid_compute)(u32 class_num); JIRA NVGPU-3109 Change-Id: I01123e9b984613d4bddb2d8cf23d63410e212408 Signed-off-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2095542 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
202 lines
5.7 KiB
C
202 lines
5.7 KiB
C
/*
|
|
* GP106 GPU GR
|
|
*
|
|
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
* DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#include <nvgpu/class.h>
|
|
#include <nvgpu/dma.h>
|
|
#include <nvgpu/gk20a.h>
|
|
#include <nvgpu/gr/ctx.h>
|
|
#include <nvgpu/gr/config.h>
|
|
|
|
#include "gk20a/gr_gk20a.h"
|
|
#include "gm20b/gr_gm20b.h"
|
|
#include "gp10b/gr_gp10b.h"
|
|
|
|
#include <nvgpu/io.h>
|
|
|
|
#include "gr_gp106.h"
|
|
|
|
#include <nvgpu/hw/gp106/hw_gr_gp106.h>
|
|
|
|
u32 gr_gp106_pagepool_default_size(struct gk20a *g)
|
|
{
|
|
return gr_scc_pagepool_total_pages_hwmax_value_v();
|
|
}
|
|
|
|
static void gr_gp106_set_go_idle_timeout(struct gk20a *g, u32 data)
|
|
{
|
|
gk20a_writel(g, gr_fe_go_idle_timeout_r(), data);
|
|
}
|
|
|
|
int gr_gp106_handle_sw_method(struct gk20a *g, u32 addr,
|
|
u32 class_num, u32 offset, u32 data)
|
|
{
|
|
nvgpu_log_fn(g, " ");
|
|
|
|
if (class_num == PASCAL_COMPUTE_B) {
|
|
switch (offset << 2) {
|
|
case NVC0C0_SET_SHADER_EXCEPTIONS:
|
|
gk20a_gr_set_shader_exceptions(g, data);
|
|
break;
|
|
case NVC0C0_SET_RD_COALESCE:
|
|
g->ops.gr.init.lg_coalesce(g, data);
|
|
break;
|
|
default:
|
|
goto fail;
|
|
}
|
|
}
|
|
|
|
if (class_num == PASCAL_B) {
|
|
switch (offset << 2) {
|
|
case NVC097_SET_SHADER_EXCEPTIONS:
|
|
gk20a_gr_set_shader_exceptions(g, data);
|
|
break;
|
|
case NVC097_SET_CIRCULAR_BUFFER_SIZE:
|
|
g->ops.gr.set_circular_buffer_size(g, data);
|
|
break;
|
|
case NVC097_SET_ALPHA_CIRCULAR_BUFFER_SIZE:
|
|
g->ops.gr.set_alpha_circular_buffer_size(g, data);
|
|
break;
|
|
case NVC097_SET_GO_IDLE_TIMEOUT:
|
|
gr_gp106_set_go_idle_timeout(g, data);
|
|
break;
|
|
case NVC097_SET_RD_COALESCE:
|
|
g->ops.gr.init.lg_coalesce(g, data);
|
|
break;
|
|
case NVC097_SET_BES_CROP_DEBUG3:
|
|
g->ops.gr.set_bes_crop_debug3(g, data);
|
|
break;
|
|
case NVC097_SET_BES_CROP_DEBUG4:
|
|
g->ops.gr.set_bes_crop_debug4(g, data);
|
|
break;
|
|
default:
|
|
goto fail;
|
|
}
|
|
}
|
|
return 0;
|
|
|
|
fail:
|
|
return -EINVAL;
|
|
}
|
|
|
|
int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct vm_gk20a *vm, u32 class,
|
|
u32 graphics_preempt_mode,
|
|
u32 compute_preempt_mode)
|
|
{
|
|
int err = 0;
|
|
|
|
if (class == PASCAL_B && g->gr.ctx_vars.force_preemption_gfxp) {
|
|
graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
|
|
}
|
|
|
|
if (class == PASCAL_COMPUTE_B &&
|
|
g->gr.ctx_vars.force_preemption_cilp) {
|
|
compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
|
|
}
|
|
|
|
/* check for invalid combinations */
|
|
if ((graphics_preempt_mode == 0U) && (compute_preempt_mode == 0U)) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
|
|
(compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP)) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* set preemption modes */
|
|
switch (graphics_preempt_mode) {
|
|
case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP:
|
|
{
|
|
u32 spill_size =
|
|
gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
|
|
gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
|
|
u32 pagepool_size = g->ops.gr.init.pagepool_default_size(g) *
|
|
gr_scc_pagepool_total_pages_byte_granularity_v();
|
|
u32 betacb_size = g->ops.gr.init.get_attrib_cb_default_size(g) +
|
|
(gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
|
|
gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
|
|
u32 alpha_cb_size = g->ops.gr.init.get_alpha_cb_size(g,
|
|
nvgpu_gr_config_get_tpc_count(g->gr.config));
|
|
u32 attrib_cb_size = (betacb_size + alpha_cb_size) *
|
|
gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
|
|
nvgpu_gr_config_get_max_tpc_count(g->gr.config);
|
|
attrib_cb_size = ALIGN(attrib_cb_size, 128);
|
|
|
|
nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size);
|
|
nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size);
|
|
nvgpu_log_info(g, "gfxp context attrib_cb_size=%d",
|
|
attrib_cb_size);
|
|
|
|
nvgpu_gr_ctx_set_size(g->gr.gr_ctx_desc,
|
|
NVGPU_GR_CTX_PREEMPT_CTXSW,
|
|
g->gr.ctx_vars.preempt_image_size);
|
|
nvgpu_gr_ctx_set_size(g->gr.gr_ctx_desc,
|
|
NVGPU_GR_CTX_SPILL_CTXSW, spill_size);
|
|
nvgpu_gr_ctx_set_size(g->gr.gr_ctx_desc,
|
|
NVGPU_GR_CTX_BETACB_CTXSW, attrib_cb_size);
|
|
nvgpu_gr_ctx_set_size(g->gr.gr_ctx_desc,
|
|
NVGPU_GR_CTX_PAGEPOOL_CTXSW, pagepool_size);
|
|
|
|
err = nvgpu_gr_ctx_alloc_ctxsw_buffers(g, gr_ctx,
|
|
g->gr.gr_ctx_desc, vm);
|
|
if (err != 0) {
|
|
nvgpu_err(g, "cannot allocate ctxsw buffers");
|
|
goto fail;
|
|
}
|
|
|
|
nvgpu_gr_ctx_init_graphics_preemption_mode(gr_ctx,
|
|
graphics_preempt_mode);
|
|
break;
|
|
}
|
|
|
|
case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI:
|
|
nvgpu_gr_ctx_init_graphics_preemption_mode(gr_ctx,
|
|
graphics_preempt_mode);
|
|
break;
|
|
|
|
default:
|
|
break;
|
|
}
|
|
|
|
if (class == PASCAL_COMPUTE_B) {
|
|
switch (compute_preempt_mode) {
|
|
case NVGPU_PREEMPTION_MODE_COMPUTE_WFI:
|
|
case NVGPU_PREEMPTION_MODE_COMPUTE_CTA:
|
|
case NVGPU_PREEMPTION_MODE_COMPUTE_CILP:
|
|
nvgpu_gr_ctx_init_compute_preemption_mode(gr_ctx,
|
|
compute_preempt_mode);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
return err;
|
|
}
|