forked from SaschaWillems/Vulkan
-
-
Notifications
You must be signed in to change notification settings - Fork 30
/
Copy pathvr_oculus.cpp
352 lines (302 loc) · 14.2 KB
/
vr_oculus.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
#include <vr/vr_common.hpp>
#include <OVR_CAPI_Vk.h>
namespace ovr {
using TextureSwapChainDesc = ovrTextureSwapChainDesc;
using Session = ovrSession;
using HmdDesc = ovrHmdDesc;
using GraphicsLuid = ovrGraphicsLuid;
using TextureSwapChain = ovrTextureSwapChain;
using MirrorTexture = ovrMirrorTexture;
using EyeRenderDesc = ovrEyeRenderDesc;
using LayerEyeFov = ovrLayerEyeFov;
using ViewScaleDesc = ovrViewScaleDesc;
using Posef = ovrPosef;
using EyePoses = std::array<Posef, 2>;
//using EyeType = ovrEyeType;
enum class EyeType
{
Left = ovrEye_Left,
Right = ovrEye_Right
};
// Convenience method for looping over each eye with a lambda
template <typename Function>
inline void for_each_eye(Function function) {
for (ovrEyeType eye = ovrEyeType::ovrEye_Left; eye < ovrEyeType::ovrEye_Count; eye = static_cast<ovrEyeType>(eye + 1)) {
function(eye);
}
}
inline mat4 toGlm(const ovrMatrix4f& om) {
return glm::transpose(glm::make_mat4(&om.M[0][0]));
}
inline mat4 toGlm(const ovrFovPort& fovport, float nearPlane = 0.01f, float farPlane = 10000.0f) {
return toGlm(ovrMatrix4f_Projection(fovport, nearPlane, farPlane, true));
}
inline vec3 toGlm(const ovrVector3f& ov) {
return glm::make_vec3(&ov.x);
}
inline vec2 toGlm(const ovrVector2f& ov) {
return glm::make_vec2(&ov.x);
}
inline uvec2 toGlm(const ovrSizei& ov) {
return uvec2(ov.w, ov.h);
}
inline quat toGlm(const ovrQuatf& oq) {
return glm::make_quat(&oq.x);
}
inline mat4 toGlm(const ovrPosef& op) {
mat4 orientation = glm::mat4_cast(toGlm(op.Orientation));
mat4 translation = glm::translate(mat4(), ovr::toGlm(op.Position));
return translation * orientation;
}
inline std::array<glm::mat4, 2> toGlm(const EyePoses& eyePoses) {
return std::array<glm::mat4, 2>{ toGlm(eyePoses[0]), toGlm(eyePoses[1]) };
}
inline ovrMatrix4f fromGlm(const mat4& m) {
ovrMatrix4f result;
mat4 transposed(glm::transpose(m));
memcpy(result.M, &(transposed[0][0]), sizeof(float) * 16);
return result;
}
inline ovrVector3f fromGlm(const vec3& v) {
ovrVector3f result;
result.x = v.x;
result.y = v.y;
result.z = v.z;
return result;
}
inline ovrVector2f fromGlm(const vec2& v) {
ovrVector2f result;
result.x = v.x;
result.y = v.y;
return result;
}
inline ovrSizei fromGlm(const uvec2& v) {
ovrSizei result;
result.w = v.x;
result.h = v.y;
return result;
}
inline ovrQuatf fromGlm(const quat& q) {
ovrQuatf result;
result.x = q.x;
result.y = q.y;
result.z = q.z;
result.w = q.w;
return result;
}
void OVR_CDECL logger(uintptr_t userData, int level, const char* message) {
OutputDebugStringA("OVR_SDK: ");
OutputDebugStringA(message);
OutputDebugStringA("\n");
}
} // namespace ovr
class OculusExample : public VrExample {
using Parent = VrExample;
public:
ovr::Session _session{};
ovr::HmdDesc _hmdDesc{};
ovr::GraphicsLuid _luid{};
ovr::LayerEyeFov _sceneLayer;
ovr::TextureSwapChain& _eyeTexture = _sceneLayer.ColorTexture[0];
ovr::MirrorTexture _mirrorTexture;
ovr::ViewScaleDesc _viewScaleDesc;
ovrLayerHeader* headerList = &_sceneLayer.Header;
vk::Semaphore blitComplete;
std::vector<vk::CommandBuffer> oculusBlitCommands;
std::vector<vk::CommandBuffer> mirrorBlitCommands;
~OculusExample() {
// Shut down Oculus
ovr_Destroy(_session);
_session = nullptr;
ovr_Shutdown();
}
void recenter() override { ovr_RecenterTrackingOrigin(_session); }
void prepareOculus() {
ovrInitParams initParams{ 0, OVR_MINOR_VERSION, ovr::logger, (uintptr_t)this, 0 };
if (!OVR_SUCCESS(ovr_Initialize(&initParams))) {
throw std::runtime_error("Unable to initialize Oculus SDK");
}
if (!OVR_SUCCESS(ovr_Create(&_session, &_luid))) {
throw std::runtime_error("Unable to create HMD session");
}
_hmdDesc = ovr_GetHmdDesc(_session);
_viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f;
memset(&_sceneLayer, 0, sizeof(ovrLayerEyeFov));
_sceneLayer.Header.Type = ovrLayerType_EyeFov;
_sceneLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;
ovr::for_each_eye([&](ovrEyeType eye) {
ovrEyeRenderDesc erd = ovr_GetRenderDesc(_session, eye, _hmdDesc.DefaultEyeFov[eye]);
ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(erd.Fov, 0.1f, 256.0f, ovrProjection_ClipRangeOpenGL);
eyeProjections[eye] = ovr::toGlm(ovrPerspectiveProjection);
_viewScaleDesc.HmdToEyePose[eye] = erd.HmdToEyePose;
ovrFovPort& fov = _sceneLayer.Fov[eye] = erd.Fov;
auto eyeSize = ovr_GetFovTextureSize(_session, eye, fov, 1.0f);
_sceneLayer.Viewport[eye].Size = eyeSize;
_sceneLayer.Viewport[eye].Pos = { (int)renderTargetSize.x, 0 };
renderTargetSize.y = std::max(renderTargetSize.y, (uint32_t)eyeSize.h);
renderTargetSize.x += eyeSize.w;
});
context.requireExtensions({ VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME });
context.setDevicePicker([this](const std::vector<vk::PhysicalDevice>& devices) -> vk::PhysicalDevice {
VkPhysicalDevice result;
if (!OVR_SUCCESS(ovr_GetSessionPhysicalDeviceVk(_session, _luid, context.instance, &result))) {
throw std::runtime_error("Unable to identify Vulkan device");
}
return result;
});
}
void prepareOculusSwapchain() {
ovrTextureSwapChainDesc desc = {};
desc.Type = ovrTexture_2D;
desc.ArraySize = 1;
desc.Width = renderTargetSize.x;
desc.Height = renderTargetSize.y;
desc.MipLevels = 1;
desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
desc.SampleCount = 1;
desc.StaticImage = ovrFalse;
if (!OVR_SUCCESS(ovr_CreateTextureSwapChainVk(_session, context.device, &desc, &_eyeTexture))) {
throw std::runtime_error("Unable to create swap chain");
}
int oculusSwapchainLength = 0;
if (!OVR_SUCCESS(ovr_GetTextureSwapChainLength(_session, _eyeTexture, &oculusSwapchainLength)) || !oculusSwapchainLength) {
throw std::runtime_error("Unable to count swap chain textures");
}
// Submission command buffers
if (oculusBlitCommands.empty()) {
vk::CommandBufferAllocateInfo cmdBufAllocateInfo;
cmdBufAllocateInfo.commandPool = context.getCommandPool();
cmdBufAllocateInfo.commandBufferCount = oculusSwapchainLength;
oculusBlitCommands = context.device.allocateCommandBuffers(cmdBufAllocateInfo);
}
vk::ImageBlit sceneBlit;
sceneBlit.dstSubresource.aspectMask = sceneBlit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
sceneBlit.dstSubresource.layerCount = sceneBlit.srcSubresource.layerCount = 1;
sceneBlit.dstOffsets[1] = sceneBlit.srcOffsets[1] = vk::Offset3D{ (int32_t)renderTargetSize.x, (int32_t)renderTargetSize.y, 1 };
for (int i = 0; i < oculusSwapchainLength; ++i) {
vk::CommandBuffer& cmdBuffer = oculusBlitCommands[i];
VkImage oculusImage;
if (!OVR_SUCCESS(ovr_GetTextureSwapChainBufferVk(_session, _eyeTexture, i, &oculusImage))) {
throw std::runtime_error("Unable to acquire vulkan image for index " + std::to_string(i));
}
cmdBuffer.reset(vk::CommandBufferResetFlagBits::eReleaseResources);
cmdBuffer.begin(vk::CommandBufferBeginInfo{});
context.setImageLayout(cmdBuffer, oculusImage, vk::ImageAspectFlagBits::eColor, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal);
cmdBuffer.blitImage(shapesRenderer->framebuffer.colors[0].image, vk::ImageLayout::eTransferSrcOptimal, oculusImage,
vk::ImageLayout::eTransferDstOptimal, sceneBlit, vk::Filter::eNearest);
context.setImageLayout(cmdBuffer, oculusImage, vk::ImageAspectFlagBits::eColor, vk::ImageLayout::eTransferDstOptimal,
vk::ImageLayout::eTransferSrcOptimal);
cmdBuffer.end();
}
}
void prepareOculusMirror() {
// Mirroring command buffers
ovrMirrorTextureDesc mirrorDesc;
memset(&mirrorDesc, 0, sizeof(mirrorDesc));
mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
mirrorDesc.Width = size.x;
mirrorDesc.Height = size.y;
if (!OVR_SUCCESS(ovr_CreateMirrorTextureWithOptionsVk(_session, context.device, &mirrorDesc, &_mirrorTexture))) {
throw std::runtime_error("Could not create mirror texture");
}
VkImage mirrorImage;
ovr_GetMirrorTextureBufferVk(_session, _mirrorTexture, &mirrorImage);
if (mirrorBlitCommands.empty()) {
vk::CommandBufferAllocateInfo cmdBufAllocateInfo;
cmdBufAllocateInfo.commandPool = context.getCommandPool();
cmdBufAllocateInfo.commandBufferCount = swapchain.imageCount;
mirrorBlitCommands = context.device.allocateCommandBuffers(cmdBufAllocateInfo);
}
vk::ImageBlit mirrorBlit;
mirrorBlit.dstSubresource.aspectMask = mirrorBlit.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
mirrorBlit.dstSubresource.layerCount = mirrorBlit.srcSubresource.layerCount = 1;
mirrorBlit.srcOffsets[1] = mirrorBlit.dstOffsets[1] = { (int32_t)size.x, (int32_t)size.y, 1 };
for (size_t i = 0; i < swapchain.imageCount; ++i) {
vk::CommandBuffer& cmdBuffer = mirrorBlitCommands[i];
cmdBuffer.reset(vk::CommandBufferResetFlagBits::eReleaseResources);
cmdBuffer.begin(vk::CommandBufferBeginInfo{});
context.setImageLayout(cmdBuffer, swapchain.images[i].image, vk::ImageAspectFlagBits::eColor, vk::ImageLayout::eUndefined,
vk::ImageLayout::eTransferDstOptimal);
cmdBuffer.blitImage(mirrorImage, vk::ImageLayout::eTransferSrcOptimal, swapchain.images[i].image, vk::ImageLayout::eTransferDstOptimal, mirrorBlit,
vk::Filter::eNearest);
context.setImageLayout(cmdBuffer, swapchain.images[i].image, vk::ImageAspectFlagBits::eColor, vk::ImageLayout::eTransferDstOptimal,
vk::ImageLayout::ePresentSrcKHR);
cmdBuffer.end();
}
}
// Setup any Oculus specific work that requires an existing Vulkan instance/device/queue
void prepareOculusVk() {
ovr_SetSynchonizationQueueVk(_session, context.queue);
prepareOculusSwapchain();
prepareOculusMirror();
blitComplete = context.device.createSemaphore({});
}
void prepare() {
prepareOculus();
// FIXME the Oculus API hangs if validation is enabled
// context.setValidationEnabled(true);
Parent::prepare();
prepareOculusVk();
}
void update(float delta) {
ovrResult result;
ovrSessionStatus status;
result = ovr_GetSessionStatus(_session, &status);
if (!OVR_SUCCESS(result)) {
throw std::runtime_error("Can't get session status");
}
while (!status.IsVisible || !status.HmdMounted) {
ovrResult result = ovr_GetSessionStatus(_session, &status);
if (!OVR_SUCCESS(result)) {
throw std::runtime_error("Can't get session status");
}
Sleep(100);
}
ovr_WaitToBeginFrame(_session, frameCounter);
ovr::EyePoses eyePoses;
ovr_GetEyePoses(_session, frameCounter, true, _viewScaleDesc.HmdToEyePose, eyePoses.data(), &_sceneLayer.SensorSampleTime);
eyeViews = std::array<glm::mat4, 2>{ glm::inverse(ovr::toGlm(eyePoses[0])), glm::inverse(ovr::toGlm(eyePoses[1])) };
ovr::for_each_eye([&](ovrEyeType eye) {
const auto& vp = _sceneLayer.Viewport[eye];
_sceneLayer.RenderPose[eye] = eyePoses[eye];
});
Parent::update(delta);
}
void render() {
vk::Fence submitFence = swapchain.getSubmitFence(true);
auto swapchainAcquireResult = swapchain.acquireNextImage(shapesRenderer->semaphores.renderStart);
auto swapchainIndex = swapchainAcquireResult.value;
ovrResult result;
result = ovr_BeginFrame(_session, frameCounter);
shapesRenderer->render();
int oculusIndex;
result = ovr_GetTextureSwapChainCurrentIndex(_session, _eyeTexture, &oculusIndex);
if (!OVR_SUCCESS(result)) {
throw std::runtime_error("Unable to acquire next texture index");
}
// Blit from our framebuffer to the Oculus output image (pre-recorded command buffer)
context.submit(oculusBlitCommands[oculusIndex], { { shapesRenderer->semaphores.renderComplete, vk::PipelineStageFlagBits::eColorAttachmentOutput } });
// The lack of explicit synchronization here is baffling. One of these calls must be blocking,
// meaning there would have to be some backend use of waitIdle or fences, meaning less optimal
// performance than with semaphores
result = ovr_CommitTextureSwapChain(_session, _eyeTexture);
if (!OVR_SUCCESS(result)) {
throw std::runtime_error("Unable to commit swap chain for index " + std::to_string(oculusIndex));
}
result = ovr_EndFrame(_session, frameCounter, &_viewScaleDesc, &headerList, 1);
if (!OVR_SUCCESS(result)) {
throw std::runtime_error("Unable to submit frame for index " + std::to_string(oculusIndex));
}
// Blit from the mirror buffer to the swap chain image
// Technically I could move this to with the other submit, for blitting the framebuffer to the texture,
// but there's no real way of knowing when this image is properly populated. Presumably its reliable here
// because of the blocking functionality of the ovr_SubmitFrame (or the ovr_CommitTextureSwapChain).
context.submit(mirrorBlitCommands[swapchainIndex], {}, {}, blitComplete, submitFence);
swapchain.queuePresent(blitComplete);
}
std::string getWindowTitle() {
std::string device(context.deviceProperties.deviceName);
return "Oculus SDK Example " + device + " - " + std::to_string((int)lastFPS) + " fps";
}
};
RUN_EXAMPLE(OculusExample)