gpu: nvgpu: add platform atomic support

Add new variable in nvgpu_as_map_buffer_ex_args for app
to specify the platform atomic support for the page.
When platform atomic attribute flag is set, pte memory
aperture is set to be coherent type.

renamed nvgpu_aperture_mask_coh -> nvgpu_aperture_mask_raw
function.

bug 200580236

Change-Id: I18266724dafdc8dfd96a0711f23cf08e23682afc
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2012679
(cherry picked from commit 9e0a9004b7)
Signed-off-by: Lakshmanan M <lm@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2274914
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: Sreeniketh H <sh@nvidia.com>
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2019-02-05 18:05:42 -08:00
committed by mobile promotions
parent 264691e69d
commit dacb06f464
12 changed files with 67 additions and 23 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -628,7 +628,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
"vm=%s " "vm=%s "
"%-5s GPU virt %#-12llx +%#-9llx phys %#-12llx " "%-5s GPU virt %#-12llx +%#-9llx phys %#-12llx "
"phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | " "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
"kind=%#02x APT=%-6s %c%c%c%c%c", "kind=%#02x APT=%-6s %c%c%c%c%c%c",
vm->name, vm->name,
(sgt != NULL) ? "MAP" : "UNMAP", (sgt != NULL) ? "MAP" : "UNMAP",
virt_addr, virt_addr,
@@ -643,7 +643,8 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
attrs->sparse ? 'S' : '-', attrs->sparse ? 'S' : '-',
attrs->priv ? 'P' : '-', attrs->priv ? 'P' : '-',
attrs->coherent ? 'I' : '-', attrs->coherent ? 'I' : '-',
attrs->valid ? 'V' : '-'); attrs->valid ? 'V' : '-',
attrs->platform_atomic ? 'A' : '-');
err = __nvgpu_gmmu_do_update_page_table(vm, err = __nvgpu_gmmu_do_update_page_table(vm,
sgt, sgt,
@@ -702,7 +703,8 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
.priv = priv, .priv = priv,
.coherent = flags & NVGPU_VM_MAP_IO_COHERENT, .coherent = flags & NVGPU_VM_MAP_IO_COHERENT,
.valid = (flags & NVGPU_VM_MAP_UNMAPPED_PTE) == 0U, .valid = (flags & NVGPU_VM_MAP_UNMAPPED_PTE) == 0U,
.aperture = aperture .aperture = aperture,
.platform_atomic = (flags & NVGPU_VM_MAP_PLATFORM_ATOMIC) != 0U
}; };
/* /*

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -31,7 +31,7 @@
* will not add any checks. If you want to simply use the default coherency then * will not add any checks. If you want to simply use the default coherency then
* use nvgpu_aperture_mask(). * use nvgpu_aperture_mask().
*/ */
u32 nvgpu_aperture_mask_coh(struct gk20a *g, enum nvgpu_aperture aperture, u32 nvgpu_aperture_mask_raw(struct gk20a *g, enum nvgpu_aperture aperture,
u32 sysmem_mask, u32 sysmem_coh_mask, u32 sysmem_mask, u32 sysmem_coh_mask,
u32 vidmem_mask) u32 vidmem_mask)
{ {
@@ -71,7 +71,7 @@ u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
ap = APERTURE_SYSMEM_COH; ap = APERTURE_SYSMEM_COH;
} }
return nvgpu_aperture_mask_coh(g, ap, return nvgpu_aperture_mask_raw(g, ap,
sysmem_mask, sysmem_mask,
sysmem_coh_mask, sysmem_coh_mask,
vidmem_mask); vidmem_mask);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -222,7 +222,7 @@ static void __update_pte(struct vm_gk20a *vm,
pte_w[0] |= gmmu_pte_privilege_true_f(); pte_w[0] |= gmmu_pte_privilege_true_f();
} }
pte_w[1] = nvgpu_aperture_mask_coh(g, attrs->aperture, pte_w[1] = nvgpu_aperture_mask_raw(g, attrs->aperture,
gmmu_pte_aperture_sys_mem_ncoh_f(), gmmu_pte_aperture_sys_mem_ncoh_f(),
gmmu_pte_aperture_sys_mem_coh_f(), gmmu_pte_aperture_sys_mem_coh_f(),
gmmu_pte_aperture_video_memory_f()) | gmmu_pte_aperture_video_memory_f()) |

View File

@@ -1,7 +1,7 @@
/* /*
* GP10B MMU * GP10B MMU
* *
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -78,6 +78,32 @@ clean_up_va:
return err; return err;
} }
/*
* For GV11B and TU104 MSS NVLINK HW settings are in force_snoop mode.
* This will force all the GPU mappings to be coherent.
* By default the mem aperture sets as sysmem_non_coherent and will use L2 mode.
* Change target pte aperture to sysmem_coherent if mem attribute requests for
* platform atomics to use rmw atomic capability.
*
*/
static u32 gmmu_aperture_mask(struct gk20a *g,
enum nvgpu_aperture mem_ap,
bool platform_atomic_attr,
u32 sysmem_mask,
u32 sysmem_coh_mask,
u32 vidmem_mask)
{
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC) &&
platform_atomic_attr) {
mem_ap = APERTURE_SYSMEM_COH;
}
return nvgpu_aperture_mask_raw(g, mem_ap,
sysmem_mask,
sysmem_coh_mask,
vidmem_mask);
}
static void update_gmmu_pde3_locked(struct vm_gk20a *vm, static void update_gmmu_pde3_locked(struct vm_gk20a *vm,
const struct gk20a_mmu_level *l, const struct gk20a_mmu_level *l,
struct nvgpu_gmmu_pd *pd, struct nvgpu_gmmu_pd *pd,
@@ -191,8 +217,9 @@ static void __update_pte(struct vm_gk20a *vm,
u32 pte_addr = attrs->aperture == APERTURE_SYSMEM ? u32 pte_addr = attrs->aperture == APERTURE_SYSMEM ?
gmmu_new_pte_address_sys_f(phys_shifted) : gmmu_new_pte_address_sys_f(phys_shifted) :
gmmu_new_pte_address_vid_f(phys_shifted); gmmu_new_pte_address_vid_f(phys_shifted);
u32 pte_tgt = nvgpu_aperture_mask_coh(g, u32 pte_tgt = gmmu_aperture_mask(g,
attrs->aperture, attrs->aperture,
attrs->platform_atomic,
gmmu_new_pte_aperture_sys_mem_ncoh_f(), gmmu_new_pte_aperture_sys_mem_ncoh_f(),
gmmu_new_pte_aperture_sys_mem_coh_f(), gmmu_new_pte_aperture_sys_mem_coh_f(),
gmmu_new_pte_aperture_video_memory_f()); gmmu_new_pte_aperture_video_memory_f());
@@ -253,7 +280,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
"vm=%s " "vm=%s "
"PTE: i=%-4u size=%-2u | " "PTE: i=%-4u size=%-2u | "
"GPU %#-12llx phys %#-12llx " "GPU %#-12llx phys %#-12llx "
"pgsz: %3dkb perm=%-2s kind=%#02x APT=%-6s %c%c%c%c%c " "pgsz: %3dkb perm=%-2s kind=%#02x APT=%-6s %c%c%c%c%c%c "
"ctag=0x%08x " "ctag=0x%08x "
"[0x%08x, 0x%08x]", "[0x%08x, 0x%08x]",
vm->name, vm->name,
@@ -268,6 +295,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
attrs->priv ? 'P' : '-', attrs->priv ? 'P' : '-',
attrs->coherent ? 'I' : '-', attrs->coherent ? 'I' : '-',
attrs->valid ? 'V' : '-', attrs->valid ? 'V' : '-',
attrs->platform_atomic ? 'A' : '-',
(u32)attrs->ctag / g->ops.fb.compression_page_size(g), (u32)attrs->ctag / g->ops.fb.compression_page_size(g),
pte_w[1], pte_w[0]); pte_w[1], pte_w[0]);

View File

@@ -1,7 +1,7 @@
/* /*
* GV11B Tegra HAL interface * GV11B Tegra HAL interface
* *
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -957,6 +957,7 @@ int gv11b_init_hal(struct gk20a *g)
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, true); __nvgpu_set_enabled(g, NVGPU_FECS_TRACE_FEATURE_CONTROL, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false); __nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, false);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC, true);
g->name = "gv11b"; g->name = "gv11b";

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -178,10 +178,13 @@ struct gk20a;
/* NVGPU_GPU_IOCTL_GET_GPU_LOAD is available */ /* NVGPU_GPU_IOCTL_GET_GPU_LOAD is available */
#define NVGPU_SUPPORT_GET_GPU_LOAD 70 #define NVGPU_SUPPORT_GET_GPU_LOAD 70
/* PLATFORM_ATOMIC support */
#define NVGPU_SUPPORT_PLATFORM_ATOMIC 71
/* /*
* Must be greater than the largest bit offset in the above list. * Must be greater than the largest bit offset in the above list.
*/ */
#define NVGPU_MAX_ENABLED_BITS 71 #define NVGPU_MAX_ENABLED_BITS 72
/** /**
* nvgpu_is_enabled - Check if the passed flag is enabled. * nvgpu_is_enabled - Check if the passed flag is enabled.

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -155,6 +155,7 @@ struct nvgpu_gmmu_pd {
* valid: Set if the PTE should be marked valid. * valid: Set if the PTE should be marked valid.
* aperture: VIDMEM or SYSMEM. * aperture: VIDMEM or SYSMEM.
* debug: When set print debugging info. * debug: When set print debugging info.
* platform_atomic: True if platform_atomic flag is valid.
* *
* These fields are dynamically updated as necessary during the map: * These fields are dynamically updated as necessary during the map:
* *
@@ -173,8 +174,8 @@ struct nvgpu_gmmu_attrs {
bool valid; bool valid;
enum nvgpu_aperture aperture; enum nvgpu_aperture aperture;
bool debug; bool debug;
bool l3_alloc; bool l3_alloc;
bool platform_atomic;
}; };
struct gk20a_mmu_level { struct gk20a_mmu_level {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -349,7 +349,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem); u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem);
u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem); u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem);
u32 nvgpu_aperture_mask_coh(struct gk20a *g, enum nvgpu_aperture aperture, u32 nvgpu_aperture_mask_raw(struct gk20a *g, enum nvgpu_aperture aperture,
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask); u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask);
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem, u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask); u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -213,6 +213,7 @@ struct vm_gk20a {
#define NVGPU_VM_MAP_UNMAPPED_PTE BIT32(3) #define NVGPU_VM_MAP_UNMAPPED_PTE BIT32(3)
#define NVGPU_VM_MAP_DIRECT_KIND_CTRL BIT32(4) #define NVGPU_VM_MAP_DIRECT_KIND_CTRL BIT32(4)
#define NVGPU_VM_MAP_L3_ALLOC BIT32(5) #define NVGPU_VM_MAP_L3_ALLOC BIT32(5)
#define NVGPU_VM_MAP_PLATFORM_ATOMIC BIT32(6)
#define NVGPU_KIND_INVALID -1 #define NVGPU_KIND_INVALID -1

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -54,6 +54,8 @@ static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
core_flags |= NVGPU_VM_MAP_L3_ALLOC; core_flags |= NVGPU_VM_MAP_L3_ALLOC;
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)
core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL; core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL;
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC)
core_flags |= NVGPU_VM_MAP_PLATFORM_ATOMIC;
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS) if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS)
nvgpu_warn(g, "Ignoring deprecated flag: " nvgpu_warn(g, "Ignoring deprecated flag: "

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -43,6 +43,7 @@ int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g)
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SCG, true); __nvgpu_set_enabled(g, NVGPU_SUPPORT_SCG, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true); __nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNCPOINT_ADDRESS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true); __nvgpu_set_enabled(g, NVGPU_SUPPORT_USER_SYNCPOINT, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_PLATFORM_ATOMIC, true);
return 0; return 0;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* NVGPU Public Interface Header * NVGPU Public Interface Header
* *
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -1894,6 +1894,7 @@ struct nvgpu_as_bind_channel_args {
#define NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS (1 << 6) #define NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS (1 << 6)
#define NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC (1 << 7) #define NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC (1 << 7)
#define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8) #define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8)
#define NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC (1 << 9)
/* /*
* VM map buffer IOCTL * VM map buffer IOCTL
@@ -1940,6 +1941,10 @@ struct nvgpu_as_bind_channel_args {
* Set when userspace plans to pass in @compr_kind and @incompr_kind * Set when userspace plans to pass in @compr_kind and @incompr_kind
* instead of letting the kernel work out kind fields. * instead of letting the kernel work out kind fields.
* *
* %NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC
*
* Specify that a mapping should use platform atomics.
*
* @kind [IN] * @kind [IN]
* *
* Specify the kind to use for the mapping. * Specify the kind to use for the mapping.