From 5f4de54535b27cd3b3f5bcb95287fbd57f5039fe Mon Sep 17 00:00:00 2001 From: Seshendra Gadagottu Date: Mon, 22 Jul 2019 14:47:21 -0700 Subject: [PATCH] gpu: nvgpu: move non-secure boot related HAL to non FuSa file Moved non-secure gr falcon boot related code to non-functional safety file. Also added HAL initialization related to these functions under CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT flag. JIRA NVGPU-3741 Change-Id: I72fb92c04dc6e76c338e9a0e0cd86b12109ce284 Signed-off-by: Seshendra Gadagottu Reviewed-on: https://git-master.nvidia.com/r/2158936 GVS: Gerrit_Virtual_Submit Reviewed-by: Vinod Gopalakrishnakurup Reviewed-by: mobile promotions Tested-by: mobile promotions --- .../gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b.c | 145 ++++++++++++++++++ .../hal/gr/falcon/gr_falcon_gm20b_fusa.c | 141 ----------------- drivers/gpu/nvgpu/hal/init/hal_gm20b.c | 10 +- drivers/gpu/nvgpu/hal/init/hal_gp10b.c | 10 +- drivers/gpu/nvgpu/hal/init/hal_gv11b.c | 10 +- drivers/gpu/nvgpu/hal/init/hal_tu104.c | 4 +- 6 files changed, 163 insertions(+), 157 deletions(-) diff --git a/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b.c b/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b.c index 84aac729e..386592e67 100644 --- a/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b.c +++ b/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b.c @@ -47,6 +47,151 @@ #define CTXSW_INTR0 BIT32(0) #define CTXSW_INTR1 BIT32(1) +#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT +void gm20b_gr_falcon_load_gpccs_dmem(struct gk20a *g, + const u32 *ucode_u32_data, u32 ucode_u32_size) +{ + u32 i; + u32 checksum = 0; + + /* enable access for gpccs dmem */ + nvgpu_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | + gr_gpccs_dmemc_blk_f(0) | + gr_gpccs_dmemc_aincw_f(1))); + + for (i = 0; i < ucode_u32_size; i++) { + nvgpu_writel(g, gr_gpccs_dmemd_r(0), ucode_u32_data[i]); + checksum = nvgpu_gr_checksum_u32(checksum, ucode_u32_data[i]); + } + nvgpu_log_info(g, "gpccs dmem checksum: 0x%x", checksum); +} + +void gm20b_gr_falcon_load_fecs_dmem(struct gk20a *g, + const u32 *ucode_u32_data, u32 ucode_u32_size) +{ + u32 i; + u32 checksum = 0; + + /* set access for fecs dmem */ + nvgpu_writel(g, gr_fecs_dmemc_r(0), (gr_fecs_dmemc_offs_f(0) | + gr_fecs_dmemc_blk_f(0) | + gr_fecs_dmemc_aincw_f(1))); + + for (i = 0; i < ucode_u32_size; i++) { + nvgpu_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); + checksum = nvgpu_gr_checksum_u32(checksum, ucode_u32_data[i]); + } + nvgpu_log_info(g, "fecs dmem checksum: 0x%x", checksum); +} + +void gm20b_gr_falcon_load_gpccs_imem(struct gk20a *g, + const u32 *ucode_u32_data, u32 ucode_u32_size) +{ + u32 cfg, gpccs_imem_size; + u32 tag, i, pad_start, pad_end; + u32 checksum = 0; + + /* enable access for gpccs imem */ + nvgpu_writel(g, gr_gpccs_imemc_r(0), (gr_gpccs_imemc_offs_f(0) | + gr_gpccs_imemc_blk_f(0) | + gr_gpccs_imemc_aincw_f(1))); + + cfg = nvgpu_readl(g, gr_gpc0_cfg_r()); + gpccs_imem_size = gr_gpc0_cfg_imem_sz_v(cfg); + + /* Setup the tags for the instruction memory. */ + tag = 0; + nvgpu_writel(g, gr_gpccs_imemt_r(0), gr_gpccs_imemt_tag_f(tag)); + + for (i = 0; i < ucode_u32_size; i++) { + if ((i != 0U) && ((i % (256U/sizeof(u32))) == 0U)) { + tag = nvgpu_safe_add_u32(tag, 1U); + nvgpu_writel(g, gr_gpccs_imemt_r(0), + gr_gpccs_imemt_tag_f(tag)); + } + nvgpu_writel(g, gr_gpccs_imemd_r(0), ucode_u32_data[i]); + checksum = nvgpu_gr_checksum_u32(checksum, ucode_u32_data[i]); + } + + pad_start = nvgpu_safe_mult_u32(i, 4U); + pad_end = nvgpu_safe_add_u32(pad_start, nvgpu_safe_add_u32( + nvgpu_safe_sub_u32(256U, (pad_start % 256U)), 256U)); + for (i = pad_start; + (i < nvgpu_safe_mult_u32(gpccs_imem_size, 256U)) && + (i < pad_end); i += 4U) { + if ((i != 0U) && ((i % 256U) == 0U)) { + tag = nvgpu_safe_add_u32(tag, 1U); + nvgpu_writel(g, gr_gpccs_imemt_r(0), + gr_gpccs_imemt_tag_f(tag)); + } + nvgpu_writel(g, gr_gpccs_imemd_r(0), 0); + } + + nvgpu_log_info(g, "gpccs imem checksum: 0x%x", checksum); +} + +void gm20b_gr_falcon_load_fecs_imem(struct gk20a *g, + const u32 *ucode_u32_data, u32 ucode_u32_size) +{ + u32 cfg, fecs_imem_size; + u32 tag, i, pad_start, pad_end; + u32 checksum = 0; + + /* set access for fecs imem */ + nvgpu_writel(g, gr_fecs_imemc_r(0), (gr_fecs_imemc_offs_f(0) | + gr_fecs_imemc_blk_f(0) | + gr_fecs_imemc_aincw_f(1))); + + cfg = nvgpu_readl(g, gr_fecs_cfg_r()); + fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); + + /* Setup the tags for the instruction memory. */ + tag = 0; + nvgpu_writel(g, gr_fecs_imemt_r(0), gr_fecs_imemt_tag_f(tag)); + + for (i = 0; i < ucode_u32_size; i++) { + if ((i != 0U) && ((i % (256U/sizeof(u32))) == 0U)) { + tag = nvgpu_safe_add_u32(tag, 1U); + nvgpu_writel(g, gr_fecs_imemt_r(0), + gr_fecs_imemt_tag_f(tag)); + } + nvgpu_writel(g, gr_fecs_imemd_r(0), ucode_u32_data[i]); + checksum = nvgpu_gr_checksum_u32(checksum, ucode_u32_data[i]); + } + + pad_start = nvgpu_safe_mult_u32(i, 4U); + pad_end = nvgpu_safe_add_u32(pad_start, nvgpu_safe_add_u32( + nvgpu_safe_sub_u32(256U, (pad_start % 256U)), 256U)); + for (i = pad_start; + (i < nvgpu_safe_mult_u32(fecs_imem_size, 256U)) && i < pad_end; + i += 4U) { + if ((i != 0U) && ((i % 256U) == 0U)) { + tag = nvgpu_safe_add_u32(tag, 1U); + nvgpu_writel(g, gr_fecs_imemt_r(0), + gr_fecs_imemt_tag_f(tag)); + } + nvgpu_writel(g, gr_fecs_imemd_r(0), 0); + } + nvgpu_log_info(g, "fecs imem checksum: 0x%x", checksum); +} + +void gm20b_gr_falcon_start_ucode(struct gk20a *g) +{ + nvgpu_log_fn(g, " "); + + nvgpu_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0U), + gr_fecs_ctxsw_mailbox_clear_value_f(~U32(0U))); + + nvgpu_writel(g, gr_gpccs_dmactl_r(), gr_gpccs_dmactl_require_ctx_f(0U)); + nvgpu_writel(g, gr_fecs_dmactl_r(), gr_fecs_dmactl_require_ctx_f(0U)); + + nvgpu_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1U)); + nvgpu_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1U)); + + nvgpu_log_fn(g, "done"); +} +#endif + #ifdef CONFIG_NVGPU_SIM void gm20b_gr_falcon_configure_fmodel(struct gk20a *g) { diff --git a/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b_fusa.c b/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b_fusa.c index 3cd1f95f9..109decf6f 100644 --- a/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b_fusa.c +++ b/drivers/gpu/nvgpu/hal/gr/falcon/gr_falcon_gm20b_fusa.c @@ -47,153 +47,12 @@ #define CTXSW_INTR0 BIT32(0) #define CTXSW_INTR1 BIT32(1) -void gm20b_gr_falcon_load_gpccs_dmem(struct gk20a *g, - const u32 *ucode_u32_data, u32 ucode_u32_size) -{ - u32 i; - u32 checksum = 0; - - /* enable access for gpccs dmem */ - nvgpu_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) | - gr_gpccs_dmemc_blk_f(0) | - gr_gpccs_dmemc_aincw_f(1))); - - for (i = 0; i < ucode_u32_size; i++) { - nvgpu_writel(g, gr_gpccs_dmemd_r(0), ucode_u32_data[i]); - checksum = nvgpu_gr_checksum_u32(checksum, ucode_u32_data[i]); - } - nvgpu_log_info(g, "gpccs dmem checksum: 0x%x", checksum); -} - -void gm20b_gr_falcon_load_fecs_dmem(struct gk20a *g, - const u32 *ucode_u32_data, u32 ucode_u32_size) -{ - u32 i; - u32 checksum = 0; - - /* set access for fecs dmem */ - nvgpu_writel(g, gr_fecs_dmemc_r(0), (gr_fecs_dmemc_offs_f(0) | - gr_fecs_dmemc_blk_f(0) | - gr_fecs_dmemc_aincw_f(1))); - - for (i = 0; i < ucode_u32_size; i++) { - nvgpu_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]); - checksum = nvgpu_gr_checksum_u32(checksum, ucode_u32_data[i]); - } - nvgpu_log_info(g, "fecs dmem checksum: 0x%x", checksum); -} - -void gm20b_gr_falcon_load_gpccs_imem(struct gk20a *g, - const u32 *ucode_u32_data, u32 ucode_u32_size) -{ - u32 cfg, gpccs_imem_size; - u32 tag, i, pad_start, pad_end; - u32 checksum = 0; - - /* enable access for gpccs imem */ - nvgpu_writel(g, gr_gpccs_imemc_r(0), (gr_gpccs_imemc_offs_f(0) | - gr_gpccs_imemc_blk_f(0) | - gr_gpccs_imemc_aincw_f(1))); - - cfg = nvgpu_readl(g, gr_gpc0_cfg_r()); - gpccs_imem_size = gr_gpc0_cfg_imem_sz_v(cfg); - - /* Setup the tags for the instruction memory. */ - tag = 0; - nvgpu_writel(g, gr_gpccs_imemt_r(0), gr_gpccs_imemt_tag_f(tag)); - - for (i = 0; i < ucode_u32_size; i++) { - if ((i != 0U) && ((i % (256U/sizeof(u32))) == 0U)) { - tag = nvgpu_safe_add_u32(tag, 1U); - nvgpu_writel(g, gr_gpccs_imemt_r(0), - gr_gpccs_imemt_tag_f(tag)); - } - nvgpu_writel(g, gr_gpccs_imemd_r(0), ucode_u32_data[i]); - checksum = nvgpu_gr_checksum_u32(checksum, ucode_u32_data[i]); - } - - pad_start = nvgpu_safe_mult_u32(i, 4U); - pad_end = nvgpu_safe_add_u32(pad_start, nvgpu_safe_add_u32( - nvgpu_safe_sub_u32(256U, (pad_start % 256U)), 256U)); - for (i = pad_start; - (i < nvgpu_safe_mult_u32(gpccs_imem_size, 256U)) && - (i < pad_end); i += 4U) { - if ((i != 0U) && ((i % 256U) == 0U)) { - tag = nvgpu_safe_add_u32(tag, 1U); - nvgpu_writel(g, gr_gpccs_imemt_r(0), - gr_gpccs_imemt_tag_f(tag)); - } - nvgpu_writel(g, gr_gpccs_imemd_r(0), 0); - } - - nvgpu_log_info(g, "gpccs imem checksum: 0x%x", checksum); -} - -void gm20b_gr_falcon_load_fecs_imem(struct gk20a *g, - const u32 *ucode_u32_data, u32 ucode_u32_size) -{ - u32 cfg, fecs_imem_size; - u32 tag, i, pad_start, pad_end; - u32 checksum = 0; - - /* set access for fecs imem */ - nvgpu_writel(g, gr_fecs_imemc_r(0), (gr_fecs_imemc_offs_f(0) | - gr_fecs_imemc_blk_f(0) | - gr_fecs_imemc_aincw_f(1))); - - cfg = nvgpu_readl(g, gr_fecs_cfg_r()); - fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg); - - /* Setup the tags for the instruction memory. */ - tag = 0; - nvgpu_writel(g, gr_fecs_imemt_r(0), gr_fecs_imemt_tag_f(tag)); - - for (i = 0; i < ucode_u32_size; i++) { - if ((i != 0U) && ((i % (256U/sizeof(u32))) == 0U)) { - tag = nvgpu_safe_add_u32(tag, 1U); - nvgpu_writel(g, gr_fecs_imemt_r(0), - gr_fecs_imemt_tag_f(tag)); - } - nvgpu_writel(g, gr_fecs_imemd_r(0), ucode_u32_data[i]); - checksum = nvgpu_gr_checksum_u32(checksum, ucode_u32_data[i]); - } - - pad_start = nvgpu_safe_mult_u32(i, 4U); - pad_end = nvgpu_safe_add_u32(pad_start, nvgpu_safe_add_u32( - nvgpu_safe_sub_u32(256U, (pad_start % 256U)), 256U)); - for (i = pad_start; - (i < nvgpu_safe_mult_u32(fecs_imem_size, 256U)) && i < pad_end; - i += 4U) { - if ((i != 0U) && ((i % 256U) == 0U)) { - tag = nvgpu_safe_add_u32(tag, 1U); - nvgpu_writel(g, gr_fecs_imemt_r(0), - gr_fecs_imemt_tag_f(tag)); - } - nvgpu_writel(g, gr_fecs_imemd_r(0), 0); - } - nvgpu_log_info(g, "fecs imem checksum: 0x%x", checksum); -} u32 gm20b_gr_falcon_get_gpccs_start_reg_offset(void) { return (gr_gpcs_gpccs_falcon_hwcfg_r() - gr_fecs_falcon_hwcfg_r()); } -void gm20b_gr_falcon_start_ucode(struct gk20a *g) -{ - nvgpu_log_fn(g, " "); - - nvgpu_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0U), - gr_fecs_ctxsw_mailbox_clear_value_f(~U32(0U))); - - nvgpu_writel(g, gr_gpccs_dmactl_r(), gr_gpccs_dmactl_require_ctx_f(0U)); - nvgpu_writel(g, gr_fecs_dmactl_r(), gr_fecs_dmactl_require_ctx_f(0U)); - - nvgpu_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1U)); - nvgpu_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1U)); - - nvgpu_log_fn(g, "done"); -} void gm20b_gr_falcon_start_gpccs(struct gk20a *g) { diff --git a/drivers/gpu/nvgpu/hal/init/hal_gm20b.c b/drivers/gpu/nvgpu/hal/init/hal_gm20b.c index de7dc039d..5cbe6111a 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gm20b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gm20b.c @@ -477,14 +477,18 @@ static const struct gpu_ops gm20b_ops = { gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size, .get_fecs_ctx_state_store_major_rev_id = gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id, +#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT .load_gpccs_dmem = gm20b_gr_falcon_load_gpccs_dmem, .load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem, .load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem, .load_fecs_imem = gm20b_gr_falcon_load_fecs_imem, + .start_ucode = gm20b_gr_falcon_start_ucode, + .load_ctxsw_ucode = + nvgpu_gr_falcon_load_ctxsw_ucode, +#endif #ifdef CONFIG_NVGPU_SIM .configure_fmodel = gm20b_gr_falcon_configure_fmodel, #endif - .start_ucode = gm20b_gr_falcon_start_ucode, .start_gpccs = gm20b_gr_falcon_start_gpccs, .start_fecs = gm20b_gr_falcon_start_fecs, .get_gpccs_start_reg_offset = @@ -494,10 +498,6 @@ static const struct gpu_ops gm20b_ops = { gm20b_gr_falcon_load_ctxsw_ucode_header, .load_ctxsw_ucode_boot = gm20b_gr_falcon_load_ctxsw_ucode_boot, -#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT - .load_ctxsw_ucode = - nvgpu_gr_falcon_load_ctxsw_ucode, -#endif .wait_mem_scrubbing = gm20b_gr_falcon_wait_mem_scrubbing, .wait_ctxsw_ready = gm20b_gr_falcon_wait_ctxsw_ready, diff --git a/drivers/gpu/nvgpu/hal/init/hal_gp10b.c b/drivers/gpu/nvgpu/hal/init/hal_gp10b.c index 9d553a914..611869199 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gp10b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gp10b.c @@ -544,14 +544,18 @@ static const struct gpu_ops gp10b_ops = { gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size, .get_fecs_ctx_state_store_major_rev_id = gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id, +#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT .load_gpccs_dmem = gm20b_gr_falcon_load_gpccs_dmem, .load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem, .load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem, .load_fecs_imem = gm20b_gr_falcon_load_fecs_imem, + .start_ucode = gm20b_gr_falcon_start_ucode, + .load_ctxsw_ucode = + nvgpu_gr_falcon_load_ctxsw_ucode, +#endif #ifdef CONFIG_NVGPU_SIM .configure_fmodel = gm20b_gr_falcon_configure_fmodel, #endif - .start_ucode = gm20b_gr_falcon_start_ucode, .start_gpccs = gm20b_gr_falcon_start_gpccs, .start_fecs = gm20b_gr_falcon_start_fecs, .get_gpccs_start_reg_offset = @@ -561,10 +565,6 @@ static const struct gpu_ops gp10b_ops = { gm20b_gr_falcon_load_ctxsw_ucode_header, .load_ctxsw_ucode_boot = gm20b_gr_falcon_load_ctxsw_ucode_boot, -#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT - .load_ctxsw_ucode = - nvgpu_gr_falcon_load_ctxsw_ucode, -#endif .wait_mem_scrubbing = gm20b_gr_falcon_wait_mem_scrubbing, .wait_ctxsw_ready = gm20b_gr_falcon_wait_ctxsw_ready, diff --git a/drivers/gpu/nvgpu/hal/init/hal_gv11b.c b/drivers/gpu/nvgpu/hal/init/hal_gv11b.c index f99e1c5bb..99f24f2fa 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_gv11b.c +++ b/drivers/gpu/nvgpu/hal/init/hal_gv11b.c @@ -663,14 +663,18 @@ static const struct gpu_ops gv11b_ops = { gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size, .get_fecs_ctx_state_store_major_rev_id = gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id, +#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT .load_gpccs_dmem = gm20b_gr_falcon_load_gpccs_dmem, .load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem, .load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem, .load_fecs_imem = gm20b_gr_falcon_load_fecs_imem, + .start_ucode = gm20b_gr_falcon_start_ucode, + .load_ctxsw_ucode = + nvgpu_gr_falcon_load_ctxsw_ucode, +#endif #ifdef CONFIG_NVGPU_SIM .configure_fmodel = gm20b_gr_falcon_configure_fmodel, #endif - .start_ucode = gm20b_gr_falcon_start_ucode, .start_gpccs = gm20b_gr_falcon_start_gpccs, .start_fecs = gm20b_gr_falcon_start_fecs, .get_gpccs_start_reg_offset = @@ -680,10 +684,6 @@ static const struct gpu_ops gv11b_ops = { gm20b_gr_falcon_load_ctxsw_ucode_header, .load_ctxsw_ucode_boot = gm20b_gr_falcon_load_ctxsw_ucode_boot, -#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT - .load_ctxsw_ucode = - nvgpu_gr_falcon_load_ctxsw_ucode, -#endif .wait_mem_scrubbing = gm20b_gr_falcon_wait_mem_scrubbing, .wait_ctxsw_ready = gm20b_gr_falcon_wait_ctxsw_ready, diff --git a/drivers/gpu/nvgpu/hal/init/hal_tu104.c b/drivers/gpu/nvgpu/hal/init/hal_tu104.c index 2b8d63c45..31bb6bc13 100644 --- a/drivers/gpu/nvgpu/hal/init/hal_tu104.c +++ b/drivers/gpu/nvgpu/hal/init/hal_tu104.c @@ -688,14 +688,16 @@ static const struct gpu_ops tu104_ops = { gm20b_gr_falcon_get_fecs_ctxsw_mailbox_size, .get_fecs_ctx_state_store_major_rev_id = gm20b_gr_falcon_get_fecs_ctx_state_store_major_rev_id, +#ifdef CONFIG_NVGPU_GR_FALCON_NON_SECURE_BOOT .load_gpccs_dmem = gm20b_gr_falcon_load_gpccs_dmem, .load_fecs_dmem = gm20b_gr_falcon_load_fecs_dmem, .load_gpccs_imem = gm20b_gr_falcon_load_gpccs_imem, .load_fecs_imem = gm20b_gr_falcon_load_fecs_imem, + .start_ucode = gm20b_gr_falcon_start_ucode, +#endif #ifdef CONFIG_NVGPU_SIM .configure_fmodel = gm20b_gr_falcon_configure_fmodel, #endif - .start_ucode = gm20b_gr_falcon_start_ucode, .start_gpccs = gm20b_gr_falcon_start_gpccs, .start_fecs = gm20b_gr_falcon_start_fecs, .get_gpccs_start_reg_offset =