gpu: nvgpu: update common.gr doxygen

Add below updates to common.gr doxygen:

- Add doxygen comments for APIs that are mentioned in RM SWAD and in
  RM-common.gr traceability document.
- Comment about valid ranges for input parameters of bunch of functions.
- Add nvgpu_assert() to ensure correct value is passed as input
  parameter to number of functions.
- Add references to relevant functions with @see.
- Update Targets field for unit tests to cover newly doxygenated
  functions.
- Update unit test test_gr_init_hal_pd_skip_table_gpc to take care of
  new asserts added into some APIs.

Jira NVGPU-6180

Change-Id: Ie889bed96b6428b1fd86dcf30b322944464e9d12
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2469397
(cherry picked from commit 5d7d7e9ce1c4efe836ab842d7962a3aee4e8972f)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2469394
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2021-01-04 17:12:50 +05:30
committed by mobile promotions
parent 27b321e1a9
commit bb43f11a61
17 changed files with 208 additions and 59 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -52,6 +52,7 @@ void nvgpu_gr_ctx_desc_free(struct gk20a *g,
void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc, void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc,
u32 index, u32 size) u32 index, u32 size)
{ {
nvgpu_assert(index < NVGPU_GR_CTX_COUNT);
gr_ctx_desc->size[index] = size; gr_ctx_desc->size[index] = size;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -74,6 +74,7 @@ void nvgpu_gr_global_ctx_desc_free(struct gk20a *g,
void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc, void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
u32 index, size_t size) u32 index, size_t size)
{ {
nvgpu_assert(index < NVGPU_GR_GLOBAL_CTX_COUNT);
desc[index].size = size; desc[index].size = size;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -145,6 +145,8 @@ u32 nvgpu_gr_gpc_offset(struct gk20a *g, u32 gpc)
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 gpc_offset = nvgpu_safe_mult_u32(gpc_stride , gpc); u32 gpc_offset = nvgpu_safe_mult_u32(gpc_stride , gpc);
nvgpu_assert(gpc < nvgpu_gr_config_get_gpc_count(nvgpu_gr_get_config_ptr(g)));
return gpc_offset; return gpc_offset;
} }
@@ -154,6 +156,8 @@ u32 nvgpu_gr_tpc_offset(struct gk20a *g, u32 tpc)
GPU_LIT_TPC_IN_GPC_STRIDE); GPU_LIT_TPC_IN_GPC_STRIDE);
u32 tpc_offset = nvgpu_safe_mult_u32(tpc_in_gpc_stride, tpc); u32 tpc_offset = nvgpu_safe_mult_u32(tpc_in_gpc_stride, tpc);
nvgpu_assert(tpc < nvgpu_gr_config_get_max_tpc_per_gpc_count(nvgpu_gr_get_config_ptr(g)));
return tpc_offset; return tpc_offset;
} }
@@ -162,6 +166,8 @@ u32 nvgpu_gr_sm_offset(struct gk20a *g, u32 sm)
u32 sm_pri_stride = nvgpu_get_litter_value(g, GPU_LIT_SM_PRI_STRIDE); u32 sm_pri_stride = nvgpu_get_litter_value(g, GPU_LIT_SM_PRI_STRIDE);
u32 sm_offset = nvgpu_safe_mult_u32(sm_pri_stride, sm); u32 sm_offset = nvgpu_safe_mult_u32(sm_pri_stride, sm);
nvgpu_assert(sm < nvgpu_gr_config_get_sm_count_per_tpc(nvgpu_gr_get_config_ptr(g)));
return sm_offset; return sm_offset;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -727,6 +727,7 @@ u32 nvgpu_gr_config_get_sm_count_per_tpc(struct nvgpu_gr_config *config)
u32 nvgpu_gr_config_get_gpc_ppc_count(struct nvgpu_gr_config *config, u32 nvgpu_gr_config_get_gpc_ppc_count(struct nvgpu_gr_config *config,
u32 gpc_index) u32 gpc_index)
{ {
nvgpu_assert(gpc_index < nvgpu_gr_config_get_gpc_count(config));
return config->gpc_ppc_count[gpc_index]; return config->gpc_ppc_count[gpc_index];
} }
@@ -747,6 +748,8 @@ u32 nvgpu_gr_config_get_gpc_tpc_count(struct nvgpu_gr_config *config,
u32 nvgpu_gr_config_get_pes_tpc_count(struct nvgpu_gr_config *config, u32 nvgpu_gr_config_get_pes_tpc_count(struct nvgpu_gr_config *config,
u32 gpc_index, u32 pes_index) u32 gpc_index, u32 pes_index)
{ {
nvgpu_assert(gpc_index < nvgpu_gr_config_get_gpc_count(config));
nvgpu_assert(pes_index < nvgpu_gr_config_get_pe_count_per_gpc(config));
return config->pes_tpc_count[pes_index][gpc_index]; return config->pes_tpc_count[pes_index][gpc_index];
} }
@@ -758,12 +761,14 @@ u32 *nvgpu_gr_config_get_gpc_tpc_mask_base(struct nvgpu_gr_config *config)
u32 nvgpu_gr_config_get_gpc_tpc_mask(struct nvgpu_gr_config *config, u32 nvgpu_gr_config_get_gpc_tpc_mask(struct nvgpu_gr_config *config,
u32 gpc_index) u32 gpc_index)
{ {
nvgpu_assert(gpc_index < nvgpu_gr_config_get_gpc_count(config));
return config->gpc_tpc_mask[gpc_index]; return config->gpc_tpc_mask[gpc_index];
} }
void nvgpu_gr_config_set_gpc_tpc_mask(struct nvgpu_gr_config *config, void nvgpu_gr_config_set_gpc_tpc_mask(struct nvgpu_gr_config *config,
u32 gpc_index, u32 val) u32 gpc_index, u32 val)
{ {
nvgpu_assert(gpc_index < nvgpu_gr_config_get_gpc_count(config));
config->gpc_tpc_mask[gpc_index] = val; config->gpc_tpc_mask[gpc_index] = val;
} }
@@ -779,6 +784,8 @@ u32 nvgpu_gr_config_get_gpc_skip_mask(struct nvgpu_gr_config *config,
u32 nvgpu_gr_config_get_pes_tpc_mask(struct nvgpu_gr_config *config, u32 nvgpu_gr_config_get_pes_tpc_mask(struct nvgpu_gr_config *config,
u32 gpc_index, u32 pes_index) u32 gpc_index, u32 pes_index)
{ {
nvgpu_assert(gpc_index < nvgpu_gr_config_get_gpc_count(config));
nvgpu_assert(pes_index < nvgpu_gr_config_get_pe_count_per_gpc(config));
return config->pes_tpc_mask[pes_index][gpc_index]; return config->pes_tpc_mask[pes_index][gpc_index];
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -315,6 +315,36 @@ struct gops_gr_falcon {
int (*ctrl_ctxsw)(struct gk20a *g, u32 fecs_method, int (*ctrl_ctxsw)(struct gk20a *g, u32 fecs_method,
u32 fecs_data, u32 *ret_val); u32 fecs_data, u32 *ret_val);
/**
* @brief Wait for FECS/GPCCS IMEM/DMEM scrubbing to complete with
* timeout of CTXSW_MEM_SCRUBBING_TIMEOUT_MAX_US.
*
* @param g [in] Pointer to GPU driver struct.
*
* Wait for scrubbing of IMEM and DMEM of FECS and GPCCS falcons
* to complete with a timeout of \a CTXSW_MEM_SCRUBBING_TIMEOUT_MAX_US.
*
* @return 0 in case of success, < 0 in case of failure.
* @retval -ETIMEDOUT if falcon scrubbing timed out.
*/
int (*wait_mem_scrubbing)(struct gk20a *g);
/**
* @brief Wait for CTXSW falcon to get ready.
*
* @param g [in] Pointer to GPU driver struct.
*
* Wait for CTXSW falcon to get ready by waiting for upto
* \a NVGPU_DEFAULT_POLL_TIMEOUT_MS to get correct response
* codes in falcon mailboxes.
* Configure CTXSW watchdog timeout with value of
* \a CTXSW_WDT_DEFAULT_VALUE.
*
* @return 0 in case of success, < 0 in case of failure.
* @retval -ETIMEDOUT if communication with falcon timed out.
*/
int (*wait_ctxsw_ready)(struct gk20a *g);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */ /** @cond DOXYGEN_SHOULD_SKIP_THIS */
void (*handle_fecs_ecc_error)(struct gk20a *g, void (*handle_fecs_ecc_error)(struct gk20a *g,
struct nvgpu_fecs_ecc_status *fecs_ecc_status); struct nvgpu_fecs_ecc_status *fecs_ecc_status);
@@ -331,8 +361,6 @@ struct gops_gr_falcon {
u32 (*get_gpccs_start_reg_offset)(void); u32 (*get_gpccs_start_reg_offset)(void);
int (*load_ctxsw_ucode)(struct gk20a *g, int (*load_ctxsw_ucode)(struct gk20a *g,
struct nvgpu_gr_falcon *falcon); struct nvgpu_gr_falcon *falcon);
int (*wait_mem_scrubbing)(struct gk20a *g);
int (*wait_ctxsw_ready)(struct gk20a *g);
u32 (*get_current_ctx)(struct gk20a *g); u32 (*get_current_ctx)(struct gk20a *g);
u32 (*get_ctx_ptr)(u32 ctx); u32 (*get_ctx_ptr)(u32 ctx);
u32 (*get_fecs_current_ctx_data)(struct gk20a *g, u32 (*get_fecs_current_ctx_data)(struct gk20a *g,
@@ -644,6 +672,53 @@ struct gops_gr_init {
u32 *default_graphics_preempt_mode, u32 *default_graphics_preempt_mode,
u32 *default_compute_preempt_mode); u32 *default_compute_preempt_mode);
/**
* @brief Wait for graphics engine to idle with timeout of
* NVGPU_DEFAULT_POLL_TIMEOUT_MS.
*
* @param g [in] Pointer to GPU driver struct.
*
* Wait for graphics engine to idle with timeout of
* \a NVGPU_DEFAULT_POLL_TIMEOUT_MS.
* During graphics engine programming it is necessary to ensure
* engine is idle at various steps.
*
* @return 0 in case of success, < 0 in case of failure.
* @retval -EAGAIN if graphics engine is busy and cannot idle.
*/
int (*wait_idle)(struct gk20a *g);
/**
* @brief Wait for FE method pipeline to idle with timeout of
* NVGPU_DEFAULT_POLL_TIMEOUT_MS.
*
* @param g [in] Pointer to GPU driver struct.
*
* Wait for FE unit's method pipeline to idle with timeout of
* \a NVGPU_DEFAULT_POLL_TIMEOUT_MS.
* During graphics engine programming it is necessary to ensure
* FE method pipeline is idle at various steps.
*
* @return 0 in case of success, < 0 in case of failure.
* @retval -EAGAIN if FE method pipeline is busy and cannot idle.
*/
int (*wait_fe_idle)(struct gk20a *g);
/**
* @brief Force FE power mode to always on.
*
* @param g [in] Pointer to GPU driver struct.
* @param force_on [in] Boolean flag to enable/disable power mode.
*
* Enable or disable force power on mode for graphics engine based
* on \a force_on parameter. Wait for upto 2000 uS to ensure power mode
* is correctly set.
*
* @return 0 in case of success, < 0 in case of failure.
* @retval -ETIMEDOUT if power mode was not updated correctly within timeout.
*/
int (*fe_pwr_mode_force_on)(struct gk20a *g, bool force_on);
/** @cond DOXYGEN_SHOULD_SKIP_THIS */ /** @cond DOXYGEN_SHOULD_SKIP_THIS */
int (*ecc_scrub_reg)(struct gk20a *g, int (*ecc_scrub_reg)(struct gk20a *g,
struct nvgpu_gr_config *gr_config); struct nvgpu_gr_config *gr_config);
@@ -671,9 +746,6 @@ struct gops_gr_init {
void (*cwd_gpcs_tpcs_num)(struct gk20a *g, void (*cwd_gpcs_tpcs_num)(struct gk20a *g,
u32 gpc_count, u32 tpc_count); u32 gpc_count, u32 tpc_count);
int (*wait_empty)(struct gk20a *g); int (*wait_empty)(struct gk20a *g);
int (*wait_idle)(struct gk20a *g);
int (*wait_fe_idle)(struct gk20a *g);
int (*fe_pwr_mode_force_on)(struct gk20a *g, bool force_on);
void (*override_context_reset)(struct gk20a *g); void (*override_context_reset)(struct gk20a *g);
int (*preemption_state)(struct gk20a *g); int (*preemption_state)(struct gk20a *g);
void (*fe_go_idle_timeout)(struct gk20a *g, bool enable); void (*fe_go_idle_timeout)(struct gk20a *g, bool enable);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -165,9 +165,11 @@ u32 nvgpu_gr_config_get_sm_count_per_tpc(struct nvgpu_gr_config *config);
* @brief Get PPC count for given GPC. * @brief Get PPC count for given GPC.
* *
* @param config [in] Pointer to GR configuration struct. * @param config [in] Pointer to GR configuration struct.
* @param gpc_index [in] Index of GPC. * @param gpc_index [in] Valid GPC index.
* *
* This function returns number of PPCs for given GPC index. * This function returns number of PPCs for given GPC index.
* GPC index must be less than value returned by
* #nvgpu_gr_config_get_gpc_count(), otherwise an assert is raised.
* *
* @return number of PPCs for given GPC. * @return number of PPCs for given GPC.
*/ */
@@ -190,11 +192,13 @@ u32 *nvgpu_gr_config_get_gpc_tpc_count_base(struct nvgpu_gr_config *config);
* @brief Get TPC count for given GPC. * @brief Get TPC count for given GPC.
* *
* @param config [in] Pointer to GR configuration struct. * @param config [in] Pointer to GR configuration struct.
* @param gpc_index [in] Index of GPC. * @param gpc_index [in] Valid GPC index.
* *
* This function returns number of TPCs for given GPC index. * This function returns number of TPCs for given GPC index.
* GPC index must be less than value returned by
* #nvgpu_gr_config_get_gpc_count(), otherwise value of 0 is returned.
* *
* @return number of TPCs for given GPC. * @return number of TPCs for given GPC for valid GPC index.
*/ */
u32 nvgpu_gr_config_get_gpc_tpc_count(struct nvgpu_gr_config *config, u32 nvgpu_gr_config_get_gpc_tpc_count(struct nvgpu_gr_config *config,
u32 gpc_index); u32 gpc_index);
@@ -203,14 +207,19 @@ u32 nvgpu_gr_config_get_gpc_tpc_count(struct nvgpu_gr_config *config,
* @brief Get TPC count for given PES/GPC. * @brief Get TPC count for given PES/GPC.
* *
* @param config [in] Pointer to GR configuration struct. * @param config [in] Pointer to GR configuration struct.
* @param gpc_index [in] Index of GPC. * @param gpc_index [in] Valid GPC index.
* @param pes_index [in] Index of PES. * @param pes_index [in] Valid PES index.
* *
* A GPC includes multiple TPC and PES units. A PES unit has multiple * A GPC includes multiple TPC and PES units. A PES unit has multiple
* TPC units connected to it within same GPC. * TPC units connected to it within same GPC.
* This function returns number of TPCs attached to PES with index * This function returns number of TPCs attached to PES with index
* pes_index in a GPC with index gpc_index. * pes_index in a GPC with index gpc_index.
* *
* GPC index must be less than value returned by
* #nvgpu_gr_config_get_gpc_count() and PES index must be less than value
* returned by #nvgpu_gr_config_get_pe_count_per_gpc(),
* otherwise an assert is raised.
*
* @return number of TPCs for given PES/GPC. * @return number of TPCs for given PES/GPC.
*/ */
u32 nvgpu_gr_config_get_pes_tpc_count(struct nvgpu_gr_config *config, u32 nvgpu_gr_config_get_pes_tpc_count(struct nvgpu_gr_config *config,
@@ -232,11 +241,13 @@ u32 *nvgpu_gr_config_get_gpc_tpc_mask_base(struct nvgpu_gr_config *config);
* @brief Get TPC mask for given GPC. * @brief Get TPC mask for given GPC.
* *
* @param config [in] Pointer to GR configuration struct. * @param config [in] Pointer to GR configuration struct.
* @param gpc_index [in] Index of GPC. * @param gpc_index [in] Valid GPC index.
* *
* This function returns mask of TPCs for given GPC index. * This function returns mask of TPCs for given GPC index.
* Each set bit indicates TPC with that index is available, otherwise * Each set bit indicates TPC with that index is available, otherwise
* the TPC is considered floorswept. * the TPC is considered floorswept.
* GPC index must be less than value returned by
* #nvgpu_gr_config_get_gpc_count(), otherwise an assert is raised.
* *
* @return mask of TPCs for given GPC. * @return mask of TPCs for given GPC.
*/ */
@@ -247,11 +258,13 @@ u32 nvgpu_gr_config_get_gpc_tpc_mask(struct nvgpu_gr_config *config,
* @brief Set TPC mask for given GPC. * @brief Set TPC mask for given GPC.
* *
* @param config [in] Pointer to GR configuration struct. * @param config [in] Pointer to GR configuration struct.
* @param gpc_index [in] Index of GPC. * @param gpc_index [in] Valid GPC index.
* @param val [in] Mask value to be set. * @param val [in] Mask value to be set.
* *
* This function sets the TPC mask in #nvgpu_gr_config struct * This function sets the TPC mask in #nvgpu_gr_config struct
* for given GPC index. * for given GPC index.
* GPC index must be less than value returned by
* #nvgpu_gr_config_get_gpc_count(), otherwise an assert is raised.
*/ */
void nvgpu_gr_config_set_gpc_tpc_mask(struct nvgpu_gr_config *config, void nvgpu_gr_config_set_gpc_tpc_mask(struct nvgpu_gr_config *config,
u32 gpc_index, u32 val); u32 gpc_index, u32 val);
@@ -260,11 +273,13 @@ void nvgpu_gr_config_set_gpc_tpc_mask(struct nvgpu_gr_config *config,
* @brief Get TPC skip mask for given GPC. * @brief Get TPC skip mask for given GPC.
* *
* @param config [in] Pointer to GR configuration struct. * @param config [in] Pointer to GR configuration struct.
* @param gpc_index [in] Index of GPC. * @param gpc_index [in] Valid GPC index.
* *
* This function returns skip mask of TPCs for given GPC index. * This function returns skip mask of TPCs for given GPC index.
* This mask will be used to skip certain TPC during load balancing * This mask will be used to skip certain TPC during load balancing
* if any of the PES is overloaded. * if any of the PES is overloaded.
* GPC index must be less than value returned by
* #nvgpu_gr_config_get_gpc_count(), otherwise value of 0 is returned.
* *
* @return skip mask of TPCs for given GPC. * @return skip mask of TPCs for given GPC.
*/ */
@@ -275,14 +290,19 @@ u32 nvgpu_gr_config_get_gpc_skip_mask(struct nvgpu_gr_config *config,
* @brief Get TPC mask for given PES/GPC. * @brief Get TPC mask for given PES/GPC.
* *
* @param config [in] Pointer to GR configuration struct. * @param config [in] Pointer to GR configuration struct.
* @param gpc_index [in] Index of GPC. * @param gpc_index [in] Valid GPC index.
* @param pes_index [in] Index of PES. * @param pes_index [in] Valid PES index.
* *
* A GPC includes multiple TPC and PES units. A PES unit has multiple * A GPC includes multiple TPC and PES units. A PES unit has multiple
* TPC units connected to it within same GPC. * TPC units connected to it within same GPC.
* This function returns mask of TPCs attached to PES with index * This function returns mask of TPCs attached to PES with index
* pes_index in a GPC with index gpc_index * pes_index in a GPC with index gpc_index
* *
* GPC index must be less than value returned by
* #nvgpu_gr_config_get_gpc_count() and PES index must be less than value
* returned by #nvgpu_gr_config_get_pe_count_per_gpc(),
* otherwise an assert is raised.
*
* @return mask of TPCs for given PES/GPC. * @return mask of TPCs for given PES/GPC.
*/ */
u32 nvgpu_gr_config_get_pes_tpc_mask(struct nvgpu_gr_config *config, u32 nvgpu_gr_config_get_pes_tpc_mask(struct nvgpu_gr_config *config,
@@ -326,7 +346,7 @@ void nvgpu_gr_config_set_no_of_sm(struct nvgpu_gr_config *config, u32 no_of_sm);
* @brief Get information of given SM. * @brief Get information of given SM.
* *
* @param config [in] Pointer to GR configuration struct. * @param config [in] Pointer to GR configuration struct.
* @param sm_id [in] SM index. * @param sm_id [in] Valid SM index.
* *
* common.gr unit stores information of each SM into an array of struct * common.gr unit stores information of each SM into an array of struct
* #nvgpu_sm_info. This information includes GPC/TPC indexes for * #nvgpu_sm_info. This information includes GPC/TPC indexes for
@@ -335,7 +355,7 @@ void nvgpu_gr_config_set_no_of_sm(struct nvgpu_gr_config *config, u32 no_of_sm);
* This function will return pointer to #nvgpu_sm_info struct for SM with * This function will return pointer to #nvgpu_sm_info struct for SM with
* requested index. * requested index.
* *
* @return pointer to struct #nvgpu_sm_info * @return pointer to struct #nvgpu_sm_info corresponding to requested sm_id.
*/ */
struct nvgpu_sm_info *nvgpu_gr_config_get_sm_info(struct nvgpu_gr_config *config, struct nvgpu_sm_info *nvgpu_gr_config_get_sm_info(struct nvgpu_gr_config *config,
u32 sm_id); u32 sm_id);

View File

@@ -169,7 +169,8 @@ void nvgpu_gr_ctx_desc_free(struct gk20a *g,
* @param size [in] Size of buffer to be set. * @param size [in] Size of buffer to be set.
* *
* This function sets size of GR context buffer with given buffer * This function sets size of GR context buffer with given buffer
* index. * index. \a index must be less than NVGPU_GR_CTX_COUNT otherwise
* an assert is raised.
*/ */
void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc, void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc,
u32 index, u32 size); u32 index, u32 size);
@@ -256,7 +257,7 @@ void nvgpu_gr_ctx_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm,
* #nvgpu_gr_ctx struct. * #nvgpu_gr_ctx struct.
* *
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
* @retval -ENOMEM if memory mapping fails. * @retval -ENOMEM if memory mapping fails for any context buffer.
*/ */
int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g, int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct nvgpu_gr_ctx *gr_ctx,
@@ -418,6 +419,9 @@ void nvgpu_gr_ctx_patch_write(struct gk20a *g,
* *
* This function will set given compute preemption mode into #nvgpu_gr_ctx * This function will set given compute preemption mode into #nvgpu_gr_ctx
* structure. * structure.
*
* @see nvgpu_gr_ctx_check_valid_preemption_mode for valid compute preemption
* modes.
*/ */
void nvgpu_gr_ctx_init_compute_preemption_mode(struct nvgpu_gr_ctx *gr_ctx, void nvgpu_gr_ctx_init_compute_preemption_mode(struct nvgpu_gr_ctx *gr_ctx,
u32 compute_preempt_mode); u32 compute_preempt_mode);
@@ -445,8 +449,10 @@ u32 nvgpu_gr_ctx_get_compute_preemption_mode(struct nvgpu_gr_ctx *gr_ctx);
* This function checks if requested graphics/compute preemption modes are * This function checks if requested graphics/compute preemption modes are
* valid or not. * valid or not.
* *
* Requesting any graphics preemption mode or requesting CILP compute * The function supports NVGPU_PREEMTION_MODE_GRAPHICS_WFI graphics
* preemption mode is not allowed in safety build. * preemption mode and NVGPU_PREEMTION_MODE_COMPUTE_WFI,
* NVGPU_PREEMTION_MODE_COMPUTE_CTA compute preemption modes as
* valid preemption modes. Any other preemption mode is invalid.
* *
* Requesting both CILP compute and GFxP graphics preemption modes at * Requesting both CILP compute and GFxP graphics preemption modes at
* the same time is not allowed. * the same time is not allowed.

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -121,7 +121,8 @@ void nvgpu_gr_global_ctx_desc_free(struct gk20a *g,
* @param size [in] Size of buffer to be set. * @param size [in] Size of buffer to be set.
* *
* This function sets size of global context buffer with given buffer * This function sets size of global context buffer with given buffer
* index. * index. \a index must be less than NVGPU_GR_GLOBAL_CTX_COUNT
* otherwise an assert is raised.
*/ */
void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc, void nvgpu_gr_global_ctx_set_size(struct nvgpu_gr_global_ctx_buffer_desc *desc,
u32 index, size_t size); u32 index, size_t size);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -43,7 +43,7 @@
* *
* common.gr unit has below responsibilities to manage GR engine h/w: * common.gr unit has below responsibilities to manage GR engine h/w:
* *
* - Enable GR engine h/w. * - Reset and enable GR engine h/w.
* - Allocate all necessary s/w data structures to hold GR engine * - Allocate all necessary s/w data structures to hold GR engine
* configuration. * configuration.
* - Configure GR engine h/w to a known good state. * - Configure GR engine h/w to a known good state.
@@ -268,10 +268,11 @@ void nvgpu_gr_remove_support(struct gk20a *g);
* @brief Get base register offset of a given GPC. * @brief Get base register offset of a given GPC.
* *
* @param g [in] Pointer to GPU driver struct. * @param g [in] Pointer to GPU driver struct.
* @param gpc [in] GPC index. * @param gpc [in] Valid GPC index.
* *
* This function calculates and returns base register offset of a given * This function calculates and returns base register offset of a given
* GPC. * GPC. GPC index must be less than value returned by
* #nvgpu_gr_config_get_gpc_count(), otherwise an assert is raised.
* *
* @return base register offset of a given GPC. * @return base register offset of a given GPC.
*/ */
@@ -281,10 +282,12 @@ u32 nvgpu_gr_gpc_offset(struct gk20a *g, u32 gpc);
* @brief Get base register offset of a given TPC in a GPC. * @brief Get base register offset of a given TPC in a GPC.
* *
* @param g [in] Pointer to GPU driver struct. * @param g [in] Pointer to GPU driver struct.
* @param tpc [in] TPC index. * @param tpc [in] Valid TPC index.
* *
* This function calculates and returns base register offset of a given * This function calculates and returns base register offset of a given
* TPC within a GPC. * TPC within a GPC. TPC index must be less than value returned by
* #nvgpu_gr_config_get_max_tpc_per_gpc_count(), otherwise an assert
* is raised.
* *
* @return base register offset of a given TPC. * @return base register offset of a given TPC.
*/ */
@@ -294,10 +297,12 @@ u32 nvgpu_gr_tpc_offset(struct gk20a *g, u32 tpc);
* @brief Get base register offset of a given SM in a GPC/TPC. * @brief Get base register offset of a given SM in a GPC/TPC.
* *
* @param g [in] Pointer to GPU driver struct. * @param g [in] Pointer to GPU driver struct.
* @param sm [in] SM index. * @param sm [in] Valid SM index.
* *
* This function calculates and returns base register offset of a given * This function calculates and returns base register offset of a given
* SM within a GPC/TPC pair. * SM within a GPC/TPC pair. SM index must be less than value returned by
* #nvgpu_gr_config_get_sm_count_per_tpc(), otherwise an assert
* is raised.
* *
* @return base register offset of a given SM. * @return base register offset of a given SM.
*/ */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -39,6 +39,7 @@ struct nvgpu_ecc_stat;
* tpcs, which is then added to the stats_list in struct nvgpu_ecc. * tpcs, which is then added to the stats_list in struct nvgpu_ecc.
* *
* @return 0 in case of success, less than 0 for failure. * @return 0 in case of success, less than 0 for failure.
* @retval -ENOMEM if memory allocation fails for any counter.
*/ */
int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g, int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
struct nvgpu_ecc_stat ***stat, const char *name); struct nvgpu_ecc_stat ***stat, const char *name);
@@ -71,6 +72,7 @@ int nvgpu_ecc_counter_init_per_tpc(struct gk20a *g,
* then added to the stats_list in struct nvgpu_ecc. * then added to the stats_list in struct nvgpu_ecc.
* *
* @return 0 in case of success, less than 0 for failure. * @return 0 in case of success, less than 0 for failure.
* @retval -ENOMEM if memory allocation fails for any counter.
*/ */
int nvgpu_ecc_counter_init_per_gpc(struct gk20a *g, int nvgpu_ecc_counter_init_per_gpc(struct gk20a *g,
struct nvgpu_ecc_stat **stat, const char *name); struct nvgpu_ecc_stat **stat, const char *name);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -209,6 +209,7 @@ void nvgpu_gr_falcon_remove_support(struct gk20a *g,
* #nvgpu_gr_falcon_load_secure_ctxsw_ucode() to boot ucodes. * #nvgpu_gr_falcon_load_secure_ctxsw_ucode() to boot ucodes.
* *
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
* @retval -ETIMEDOUT if communication with falcon timed out.
*/ */
int nvgpu_gr_falcon_init_ctxsw(struct gk20a *g, struct nvgpu_gr_falcon *falcon); int nvgpu_gr_falcon_init_ctxsw(struct gk20a *g, struct nvgpu_gr_falcon *falcon);
@@ -222,6 +223,7 @@ int nvgpu_gr_falcon_init_ctxsw(struct gk20a *g, struct nvgpu_gr_falcon *falcon);
* microcontroller. * microcontroller.
* *
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
* @retval -ETIMEDOUT if communication with falcon timed out.
*/ */
int nvgpu_gr_falcon_init_ctx_state(struct gk20a *g, int nvgpu_gr_falcon_init_ctx_state(struct gk20a *g,
struct nvgpu_gr_falcon *falcon); struct nvgpu_gr_falcon *falcon);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -217,6 +217,14 @@ int nvgpu_gr_intr_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
* quiesce state in case of uncorrected errors. * quiesce state in case of uncorrected errors.
* *
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
*
* @see nvgpu_gr_intr_handle_fecs_error
* @see nvgpu_gr_intr_handle_gpc_exception
* @see nvgpu_gr_intr_handle_notify_pending
* @see nvgpu_gr_intr_handle_semaphore_pending
* @see nvgpu_gr_intr_handle_sm_exception
* @see nvgpu_gr_intr_report_exception
* @see nvgpu_gr_intr_set_error_notifier
*/ */
int nvgpu_gr_intr_stall_isr(struct gk20a *g); int nvgpu_gr_intr_stall_isr(struct gk20a *g);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -106,6 +106,8 @@ void nvgpu_gr_obj_ctx_commit_inst(struct gk20a *g, struct nvgpu_mem *inst_block,
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
* @retval -EINVAL if invalid class is provided. * @retval -EINVAL if invalid class is provided.
* @retval -EINVAL if invalid preemption modes are provided. * @retval -EINVAL if invalid preemption modes are provided.
*
* @see nvgpu_gr_setup_set_preemption_mode.
*/ */
int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g, int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc, struct nvgpu_gr_config *config, struct nvgpu_gr_ctx_desc *gr_ctx_desc,
@@ -126,6 +128,8 @@ int nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode(struct gk20a *g,
* Preemption modes should be already initialized in #nvgpu_gr_ctx * Preemption modes should be already initialized in #nvgpu_gr_ctx
* struct by calling #nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode() * struct by calling #nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode()
* before calling this function. * before calling this function.
*
* @see nvgpu_gr_setup_set_preemption_mode.
*/ */
void nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(struct gk20a *g, void nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode(struct gk20a *g,
struct nvgpu_gr_config *config, struct nvgpu_gr_config *config,
@@ -181,6 +185,8 @@ void nvgpu_gr_obj_ctx_commit_global_ctx_buffers(struct gk20a *g,
* @retval -ENOMEM if local golden context memory allocation fails. * @retval -ENOMEM if local golden context memory allocation fails.
* @retval -ENOMEM if contents from two golden image captures do not * @retval -ENOMEM if contents from two golden image captures do not
* match. * match.
*
* @see nvgpu_gr_setup_alloc_obj_ctx.
*/ */
int nvgpu_gr_obj_ctx_alloc_golden_ctx_image(struct gk20a *g, int nvgpu_gr_obj_ctx_alloc_golden_ctx_image(struct gk20a *g,
struct nvgpu_gr_obj_ctx_golden_image *golden_image, struct nvgpu_gr_obj_ctx_golden_image *golden_image,
@@ -226,7 +232,11 @@ int nvgpu_gr_obj_ctx_alloc_golden_ctx_image(struct gk20a *g,
* *
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
* @retval -ENOMEM if memory allocation fails during any step. * @retval -ENOMEM if memory allocation fails during any step.
* @retval -ENOMEM if contents from two golden image captures do not match.
* @retval -ETIMEDOUT if GR engine idle times out. * @retval -ETIMEDOUT if GR engine idle times out.
* @retval -EINVAL if invalid GPU class ID is provided.
*
* @see nvgpu_gr_setup_alloc_obj_ctx.
*/ */
int nvgpu_gr_obj_ctx_alloc(struct gk20a *g, int nvgpu_gr_obj_ctx_alloc(struct gk20a *g,
struct nvgpu_gr_obj_ctx_golden_image *golden_image, struct nvgpu_gr_obj_ctx_golden_image *golden_image,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -61,6 +61,7 @@ struct nvgpu_gr_ctx;
* - Allocating patch context image. * - Allocating patch context image.
* - Creating Golden context imaage upon first request to allocate * - Creating Golden context imaage upon first request to allocate
* object context. * object context.
* - Capturing golden context image twice and ensuring contents match.
* - Initializating context preemption mode. * - Initializating context preemption mode.
* - Initializing various other fields in context image. * - Initializing various other fields in context image.
* - Mapping global context buffers into context image. * - Mapping global context buffers into context image.
@@ -70,16 +71,15 @@ struct nvgpu_gr_ctx;
* function will not do anything but just return success. All graphics classes * function will not do anything but just return success. All graphics classes
* are treated as invalid classes in safety. * are treated as invalid classes in safety.
* *
* @see gops_class for list of valid classes.
*
* Only below classes are supported.
* 1. VOLTA_COMPUTE_A
* 2. VOLTA_DMA_COPY_A
* 3. VOLTA_CHANNEL_GPFIFO_A
*
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
* @retval -ENOMEM if memory allocation fails for any context image. * @retval -ENOMEM if memory allocation fails for any context image.
* @retval -ENOMEM if contents from two golden image captures do not match.
* @retval -ETIMEDOUT if GR engine idle times out.
* @retval -EINVAL if invalid GPU class ID is provided. * @retval -EINVAL if invalid GPU class ID is provided.
*
* @see gops_class for list of valid classes.
* @see nvgpu_gr_obj_ctx_alloc.
* @see nvgpu_gr_obj_ctx_alloc_golden_ctx_image.
*/ */
int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num, int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
u32 flags); u32 flags);
@@ -132,6 +132,10 @@ void nvgpu_gr_setup_free_subctx(struct nvgpu_channel *c);
* @return 0 in case of success, < 0 in case of failure. * @return 0 in case of success, < 0 in case of failure.
* @retval -EINVAL if invalid preemption modes are provided. * @retval -EINVAL if invalid preemption modes are provided.
* @retval -EINVAL if invalid GPU channel pointer is provided. * @retval -EINVAL if invalid GPU channel pointer is provided.
*
* @see nvgpu_gr_ctx_check_valid_preemption_mode.
* @see nvgpu_gr_obj_ctx_set_ctxsw_preemption_mode.
* @see nvgpu_gr_obj_ctx_update_ctxsw_preemption_mode.
*/ */
int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch, int nvgpu_gr_setup_set_preemption_mode(struct nvgpu_channel *ch,
u32 graphics_preempt_mode, u32 compute_preempt_mode, u32 graphics_preempt_mode, u32 compute_preempt_mode,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -44,7 +44,9 @@ struct gk20a;
* Input: #test_fifo_init_support() run for this GPU * Input: #test_fifo_init_support() run for this GPU
* *
* Targets: gm20b_gr_falcon_wait_mem_scrubbing, * Targets: gm20b_gr_falcon_wait_mem_scrubbing,
* gops_gr_falcon.wait_mem_scrubbing,
* gm20b_gr_falcon_wait_ctxsw_ready, * gm20b_gr_falcon_wait_ctxsw_ready,
* gops_gr_falcon.wait_ctxsw_ready,
* gm20b_gr_falcon_init_ctx_state, * gm20b_gr_falcon_init_ctx_state,
* gm20b_gr_falcon_submit_fecs_method_op, * gm20b_gr_falcon_submit_fecs_method_op,
* nvgpu_gr_get_falcon_ptr, * nvgpu_gr_get_falcon_ptr,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -596,6 +596,7 @@ static int test_gr_init_hal_pd_skip_table_gpc(struct gk20a *g)
* value is reflected in each loop * value is reflected in each loop
*/ */
for (i = 0; i < gr_pd_dist_skip_table__size_1_v(); i++) { for (i = 0; i < gr_pd_dist_skip_table__size_1_v(); i++) {
if (i < nvgpu_gr_config_get_gpc_count(config)) {
config->gpc_skip_mask[i] = 0x1; config->gpc_skip_mask[i] = 0x1;
g->ops.gr.init.pd_skip_table_gpc(g, config); g->ops.gr.init.pd_skip_table_gpc(g, config);
@@ -605,6 +606,7 @@ static int test_gr_init_hal_pd_skip_table_gpc(struct gk20a *g)
config->gpc_skip_mask[i] = 0x0; config->gpc_skip_mask[i] = 0x0;
} }
}
/* All skip_masks are unset in above loop already */ /* All skip_masks are unset in above loop already */
g->ops.gr.init.pd_skip_table_gpc(g, config); g->ops.gr.init.pd_skip_table_gpc(g, config);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -222,7 +222,7 @@ int test_gr_init_hal_ecc_scrub_reg(struct unit_module *m,
* size of alpha_cb and attrib_cb. Then call g->ops.gr.init.get_attrib_cb_size * size of alpha_cb and attrib_cb. Then call g->ops.gr.init.get_attrib_cb_size
* and g->ops.gr.init.get_alpha_cb_size and verify if expected size is * and g->ops.gr.init.get_alpha_cb_size and verify if expected size is
* returned in response. * returned in response.
* - Set gpc_skip_masks for all the GPCs and call g->ops.gr.init.pd_skip_table_gpc. * - Set gpc_skip_masks for available GPCs and call g->ops.gr.init.pd_skip_table_gpc.
* Ensure that skip mask is reflected in h/w register. * Ensure that skip mask is reflected in h/w register.
* Unset all the gpc_skip_masks and ensure skip mask is unset in h/w register. * Unset all the gpc_skip_masks and ensure skip mask is unset in h/w register.
* Skip mask should be zero in h/w register only if all the skip masks are zero. * Skip mask should be zero in h/w register only if all the skip masks are zero.