gpu: nvgpu: nvgpu locks to vanilla Linux locks

Replace nvgpu locks to vanilla Linux locks. For the custom kernel
driver when they include nv-p2p.h, nvgpu/linux/lock.h will not be
available because nvgpu/linux/lock.h is not copied to
/usr/src/kernel_header_file.

Bug 200438879

Change-Id: I55b52c6f791970650388b7d51c4d30b5fe75bbb8
Signed-off-by: Preetham Chandru Ramchandra <pchandru@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1997950
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Preetham Chandru Ramchandra
2019-01-17 22:37:39 +05:30
committed by mobile promotions
parent a8d5a4d405
commit eb887094e4
2 changed files with 13 additions and 13 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -101,7 +101,7 @@ int nvidia_p2p_get_pages(u64 vaddr, u64 size,
(*page_table)->free_callback = free_callback; (*page_table)->free_callback = free_callback;
(*page_table)->data = data; (*page_table)->data = data;
(*page_table)->vaddr = vaddr; (*page_table)->vaddr = vaddr;
nvgpu_mutex_init(&(*page_table)->lock); mutex_init(&(*page_table)->lock);
(*page_table)->mapped = NVIDIA_P2P_PINNED; (*page_table)->mapped = NVIDIA_P2P_PINNED;
ret = mmu_notifier_register(&(*page_table)->mn, (*page_table)->mm); ret = mmu_notifier_register(&(*page_table)->mn, (*page_table)->mm);
@@ -143,7 +143,7 @@ int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
return 0; return 0;
} }
nvgpu_mutex_acquire(&page_table->lock); mutex_lock(&page_table->lock);
if (page_table->mapped & NVIDIA_P2P_MAPPED) { if (page_table->mapped & NVIDIA_P2P_MAPPED) {
WARN(1, "Attempting to free unmapped pages"); WARN(1, "Attempting to free unmapped pages");
@@ -161,7 +161,7 @@ int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
page_table->mapped &= (u32)~NVIDIA_P2P_PINNED; page_table->mapped &= (u32)~NVIDIA_P2P_PINNED;
} }
nvgpu_mutex_release(&page_table->lock); mutex_unlock(&page_table->lock);
return 0; return 0;
} }
@@ -183,18 +183,18 @@ int nvidia_p2p_map_pages(struct device *dev,
return -EINVAL; return -EINVAL;
} }
nvgpu_mutex_acquire(&page_table->lock); mutex_lock(&page_table->lock);
pages = page_table->pages; pages = page_table->pages;
nr_pages = page_table->entries; nr_pages = page_table->entries;
if (nr_pages <= 0) { if (nr_pages <= 0) {
nvgpu_mutex_release(&page_table->lock); mutex_unlock(&page_table->lock);
return -EINVAL; return -EINVAL;
} }
*dma_mapping = kzalloc(sizeof(**dma_mapping), GFP_KERNEL); *dma_mapping = kzalloc(sizeof(**dma_mapping), GFP_KERNEL);
if (!*dma_mapping) { if (!*dma_mapping) {
nvgpu_mutex_release(&page_table->lock); mutex_unlock(&page_table->lock);
return -ENOMEM; return -ENOMEM;
} }
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
@@ -235,7 +235,7 @@ int nvidia_p2p_map_pages(struct device *dev,
(*dma_mapping)->hw_len[i] = sg_dma_len(sg); (*dma_mapping)->hw_len[i] = sg_dma_len(sg);
} }
(*dma_mapping)->page_table->mapped |= NVIDIA_P2P_MAPPED; (*dma_mapping)->page_table->mapped |= NVIDIA_P2P_MAPPED;
nvgpu_mutex_release(&page_table->lock); mutex_unlock(&page_table->lock);
return 0; return 0;
free_hw_address: free_hw_address:
@@ -250,7 +250,7 @@ free_sgt:
free_dma_mapping: free_dma_mapping:
kfree(*dma_mapping); kfree(*dma_mapping);
*dma_mapping = NULL; *dma_mapping = NULL;
nvgpu_mutex_release(&page_table->lock); mutex_unlock(&page_table->lock);
return ret; return ret;
} }
@@ -269,7 +269,7 @@ int nvidia_p2p_unmap_pages(struct nvidia_p2p_dma_mapping *dma_mapping)
return -EFAULT; return -EFAULT;
} }
nvgpu_mutex_acquire(&page_table->lock); mutex_lock(&page_table->lock);
if (page_table->mapped & NVIDIA_P2P_MAPPED) { if (page_table->mapped & NVIDIA_P2P_MAPPED) {
kfree(dma_mapping->hw_len); kfree(dma_mapping->hw_len);
kfree(dma_mapping->hw_address); kfree(dma_mapping->hw_address);
@@ -283,7 +283,7 @@ int nvidia_p2p_unmap_pages(struct nvidia_p2p_dma_mapping *dma_mapping)
kfree(dma_mapping); kfree(dma_mapping);
page_table->mapped &= (u32)~NVIDIA_P2P_MAPPED; page_table->mapped &= (u32)~NVIDIA_P2P_MAPPED;
} }
nvgpu_mutex_release(&page_table->lock); mutex_unlock(&page_table->lock);
return 0; return 0;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -64,7 +64,7 @@ struct nvidia_p2p_page_table {
struct mm_struct *mm; struct mm_struct *mm;
struct mmu_notifier mn; struct mmu_notifier mn;
struct nvgpu_mutex lock; struct mutex lock;
void (*free_callback)(void *data); void (*free_callback)(void *data);
void *data; void *data;
}; };