forked from mitsuba-renderer/mitsuba3
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfield.h
118 lines (109 loc) · 3.87 KB
/
field.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#pragma once
#include <ostream>
#include <type_traits>
#include <drjit/array.h>
namespace dr = drjit;
NAMESPACE_BEGIN(mitsuba)
/**
* \brief Convenience wrapper to simultaneously instantiate a host and a device
* version of a type
*
* This class implements a simple wrapper that replicates instance attributes
* on the host and device. This is only relevant when \c DeviceType is a
* JIT-compiled Dr.Jit array (when compiling the renderer in CUDA/LLVM mode).
*
* Why is this needed? Mitsuba plugins represent their internal state using
* attributes like position, intensity, etc., which are typically represented
* using Dr.Jit arrays. For technical reasons, it is helpful if those fields are
* both accessible on the host (in which case the lowest-level representation
* builds on standard C++ types like float or int, for example Point<float, 3>)
* or the device, whose types invoke the JIT compiler
* (e.g., Point<CUDAArray<float>, 3>). Copying this data back and forth can be
* costly if both host and device require simultaneous access. Even if all code
* effectively runs on the host (e.g. in LLVM mode), accessing "LLVM device"
* arrays still requires traversal of JIT compiler data structures, which was a
* severe bottleneck e.g. when Embree calls shape-specific intersection routines.
*/
template <typename DeviceType,
typename HostType =
std::decay_t<decltype(dr::slice(std::declval<DeviceType>()))>,
typename SFINAE = int>
struct field {};
template <typename DeviceType, typename HostType>
struct field<DeviceType, HostType,
dr::enable_if_t<std::is_same_v<DeviceType, HostType>>> {
field() {}
field(const DeviceType &v) : m_scalar(v) { }
field(DeviceType &&v) : m_scalar(v) { }
const DeviceType& value() const { return m_scalar; }
const DeviceType& scalar() const { return m_scalar; }
DeviceType* ptr() { return &m_scalar; }
field& operator=(const field& f) {
m_scalar = f.m_scalar;
return *this;
}
field& operator=(field&& f) {
m_scalar = std::move(f.m_scalar);
return *this;
}
field& operator=(const DeviceType &v) {
m_scalar = v;
return *this;
}
field& operator=(DeviceType &&v) {
m_scalar = v;
return *this;
}
private:
DeviceType m_scalar;
};
template <typename DeviceType, typename HostType>
struct field<DeviceType, HostType,
dr::enable_if_t<!std::is_same_v<DeviceType, HostType>>> {
field() {}
field(const HostType &v) : m_value(v), m_scalar(v) { }
field(HostType &&v) : m_value(v), m_scalar(v) { }
const DeviceType& value() const { return m_value; }
const HostType& scalar() const { return m_scalar; }
DeviceType* ptr() { return &m_value; }
field& operator=(const field& f) {
m_value = f.m_value;
m_scalar = f.m_scalar;
return *this;
}
field& operator=(field&& f) {
m_value = std::move(f.m_value);
m_scalar = std::move(f.m_scalar);
return *this;
}
field& operator=(const HostType &v) {
m_value = m_scalar = v;
return *this;
}
field& operator=(HostType &&v) {
m_value = m_scalar = v;
return *this;
}
field& operator=(const DeviceType &v) {
m_value = v;
m_scalar = dr::slice<HostType>(m_value);
return *this;
}
field& operator=(DeviceType &&v) {
m_value = v;
m_scalar = dr::slice<HostType>(m_value);
return *this;
}
bool schedule_force_() { return dr::detail::schedule_force(m_value); }
private:
DeviceType m_value;
HostType m_scalar;
};
/// Prints the canonical string representation of a field
template <typename DeviceType, typename HostType>
std::ostream &operator<<(std::ostream &os,
const field<DeviceType, HostType> &f) {
os << f.scalar();
return os;
}
NAMESPACE_END(mitsuba)