Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pystencils_autodiff
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
pycodegen
pystencils_autodiff
Commits
72efef52
Commit
72efef52
authored
5 years ago
by
Stephan Seitz
Browse files
Options
Downloads
Patches
Plain Diff
Fix gpuarray_to_tensor
parent
4a04ef24
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
src/pystencils_autodiff/backends/_pytorch.py
+24
-20
24 additions, 20 deletions
src/pystencils_autodiff/backends/_pytorch.py
tests/lbm/backends/_pytorch.py
+15
-11
15 additions, 11 deletions
tests/lbm/backends/_pytorch.py
with
39 additions
and
31 deletions
src/pystencils_autodiff/backends/_pytorch.py
+
24
−
20
View file @
72efef52
...
@@ -7,8 +7,9 @@ try:
...
@@ -7,8 +7,9 @@ try:
import
pycuda.autoinit
import
pycuda.autoinit
import
pycuda.gpuarray
import
pycuda.gpuarray
import
pycuda.driver
import
pycuda.driver
HAS_PYCUDA
=
True
except
Exception
:
except
Exception
:
pass
HAS_PYCUDA
=
False
def
create_autograd_function
(
autodiff_obj
,
inputfield_to_tensor_dict
,
forward_loop
,
backward_loop
,
def
create_autograd_function
(
autodiff_obj
,
inputfield_to_tensor_dict
,
forward_loop
,
backward_loop
,
...
@@ -94,12 +95,12 @@ def numpy_dtype_to_torch(dtype):
...
@@ -94,12 +95,12 @@ def numpy_dtype_to_torch(dtype):
# Fails if different context/thread
# Fails if different context/thread
#
def tensor_to_gpuarray(tensor):
def
tensor_to_gpuarray
(
tensor
):
#
if not tensor.is_cuda:
if
not
tensor
.
is_cuda
:
#
raise ValueError(
raise
ValueError
(
#
'Cannot convert CPU tensor to GPUArray (call `cuda()` on it)')
'
Cannot convert CPU tensor to GPUArray (call `cuda()` on it)
'
)
#
else:
else
:
#
return pycuda.gpuarray.GPUArray(tensor.shape, dtype=torch_dtype_to_numpy(tensor.dtype), gpudata=tensor.data_ptr())
return
pycuda
.
gpuarray
.
GPUArray
(
tensor
.
shape
,
dtype
=
torch_dtype_to_numpy
(
tensor
.
dtype
),
gpudata
=
tensor
.
data_ptr
())
def
gpuarray_to_tensor
(
gpuarray
,
context
=
None
):
def
gpuarray_to_tensor
(
gpuarray
,
context
=
None
):
...
@@ -125,20 +126,23 @@ def gpuarray_to_tensor(gpuarray, context=None):
...
@@ -125,20 +126,23 @@ def gpuarray_to_tensor(gpuarray, context=None):
return
out
return
out
class
GpuPointerHolder
(
pycuda
.
driver
.
PointerHolderBase
):
if
HAS_PYCUDA
:
class
GpuPointerHolder
(
pycuda
.
driver
.
PointerHolderBase
):
def
__init__
(
self
,
tensor
):
def
__init__
(
self
,
tensor
):
super
().
__init__
()
super
().
__init__
()
self
.
tensor
=
tensor
self
.
tensor
=
tensor
self
.
gpudata
=
tensor
.
data_ptr
()
self
.
gpudata
=
tensor
.
data_ptr
()
def
get_pointer
(
self
):
def
get_pointer
(
self
):
return
self
.
tensor
.
data_ptr
()
return
self
.
tensor
.
data_ptr
()
def
__int__
(
self
):
def
__int__
(
self
):
return
self
.
__index__
()
return
self
.
__index__
()
# without an __index__ method, arithmetic calls to the GPUArray backed by this pointer fail
# without an __index__ method, arithmetic calls to the GPUArray backed by this pointer fail
# not sure why, this needs to return some integer, apparently
# not sure why, this needs to return some integer, apparently
def
__index__
(
self
):
def
__index__
(
self
):
return
self
.
gpudata
return
self
.
gpudata
else
:
GpuPointerHolder
=
None
This diff is collapsed.
Click to expand it.
tests/lbm/backends/_pytorch.py
+
15
−
11
View file @
72efef52
...
@@ -11,6 +11,18 @@ try:
...
@@ -11,6 +11,18 @@ try:
except
Exception
:
except
Exception
:
HAS_PYCUDA
=
False
HAS_PYCUDA
=
False
# Fails if different context/thread
def
tensor_to_gpuarray
(
tensor
):
if
not
tensor
.
is_cuda
:
raise
ValueError
(
'
Cannot convert CPU tensor to GPUArray (call `cuda()` on it)
'
)
else
:
return
pycuda
.
gpuarray
.
GPUArray
(
tensor
.
shape
,
dtype
=
torch_dtype_to_numpy
(
tensor
.
dtype
),
gpudata
=
tensor
.
data_ptr
())
def
create_autograd_function
(
autodiff_obj
,
inputfield_to_tensor_dict
,
forward_loop
,
backward_loop
,
def
create_autograd_function
(
autodiff_obj
,
inputfield_to_tensor_dict
,
forward_loop
,
backward_loop
,
convert_tensors_to_arrays
=
True
):
convert_tensors_to_arrays
=
True
):
...
@@ -92,17 +104,9 @@ def numpy_dtype_to_torch(dtype):
...
@@ -92,17 +104,9 @@ def numpy_dtype_to_torch(dtype):
return
getattr
(
torch
,
dtype_name
)
return
getattr
(
torch
,
dtype_name
)
# Fails if different context/thread
# def tensor_to_gpuarray(tensor):
# if not tensor.is_cuda:
# raise ValueError(
# 'Cannot convert CPU tensor to GPUArray (call `cuda()` on it)')
# else:
# return pycuda.gpuarray.GPUArray(tensor.shape, dtype=torch_dtype_to_numpy(tensor.dtype), gpudata=tensor.data_ptr())
def
gpuarray_to_tensor
(
gpuarray
,
context
=
None
):
def
gpuarray_to_tensor
(
gpuarray
,
context
=
None
):
'''
Convert a :class:`pycuda.gpuarray.GPUArray` to a :class:`torch.Tensor`. The underlying
"""
Convert a :class:`pycuda.gpuarray.GPUArray` to a :class:`torch.Tensor`. The underlying
storage will NOT be shared, since a new copy must be allocated.
storage will NOT be shared, since a new copy must be allocated.
Parameters
Parameters
----------
----------
...
@@ -110,7 +114,7 @@ def gpuarray_to_tensor(gpuarray, context=None):
...
@@ -110,7 +114,7 @@ def gpuarray_to_tensor(gpuarray, context=None):
Returns
Returns
-------
-------
torch.Tensor
torch.Tensor
'''
"""
if
not
context
:
if
not
context
:
context
=
pycuda
.
autoinit
.
context
context
=
pycuda
.
autoinit
.
context
shape
=
gpuarray
.
shape
shape
=
gpuarray
.
shape
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment