diff --git a/.github/workflows/wcap.yml b/.github/workflows/wcap.yml index ab143e1..9967d79 100644 --- a/.github/workflows/wcap.yml +++ b/.github/workflows/wcap.yml @@ -3,8 +3,12 @@ name: wcap on: push: branches: main + paths-ignore: + - '**/README.md' pull_request: branches: main + paths-ignore: + - '**/README.md' jobs: build: @@ -12,26 +16,31 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: repo - - name: Build + - name: Build (x64) shell: cmd - run: cd repo && build.cmd + run: cd repo && build.cmd x64 + + - name: Build (arm64) + shell: cmd + run: cd repo && build.cmd arm64 - name: Checkout wiki - uses: actions/checkout@v3 + uses: actions/checkout@v4 if: ${{ github.event_name == 'push' && github.ref_name == 'main' }} with: repository: ${{github.repository}}.wiki path: wiki - - name: Upload binary + - name: Upload binaries if: ${{ github.event_name == 'push' && github.ref_name == 'main' }} shell: cmd run: | - copy repo\wcap.exe wiki + copy repo\wcap-x64.exe wiki + copy repo\wcap-arm64.exe wiki cd wiki git config --local user.email "action@github.com" diff --git a/README.md b/README.md index 8cadf45..07d4e43 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ wcap Simple and efficient screen recording utility for Windows. -Get latest binary here: [wcap.exe][] +Get latest binary here: [wcap-x64.exe][] or [wcap-arm64.exe][] **WARNING**: Windows Defender or other AV software might report false positive detection @@ -36,12 +36,12 @@ little CPU and memory. You can choose in settings to capture only client area or full size of window - client area will not include title bar and borders for standard windows style. Recorded video size is determined by initial window size. -Make sure your GPU drivers are updated if something is not working with hardware video encoding - by default hardware encoder -is enabled, you can disable it in settings. Then video will be encoded using [Microsoft Media Foundation H264][MSMFH264] +By default hardware encoder is enabled, you can disable it in settings Make sure your GPU drivers are updated if something is +not working with hardware video encoding. Then video will be encoded using [Microsoft Media Foundation H264][MSMFH264] software encoder. You might want to explicitly use software encoder on older GPU's as their hardware encoder quality is not great. Audio is captured using [WASAPI loopback recording][] and encoded using [Microsoft Media Foundation AAC][MSMFAAC] encoder, or -undocumented Media Foundation FLAC encoder (it seems it always is present in Windows 10). +undocumented Media Foundation FLAC encoder (it seems it always is present in Windows 10 and 11). Recorded mp4 file can be set to use fragmented mp4 format in settings (only for H264 codec). Fragmented mp4 file does not require "finalizing" it. Which means that in case application or GPU driver crashes or if you run out of disk space then @@ -62,7 +62,7 @@ HEVC Software Encoding ====================== HEVC encoding in software (on CPU) will require installing HEVC Video Extensions from Windows Store. It will support only -8-bit encoding. You can get direct download to installer package without using Windows Store with following steps: +8-bit encoding. You can get direct download to installer package without using Windows Store application with following steps: 1) open https://store.rg-adguard.net/ 2) search `https://www.microsoft.com/store/productId/9n4wgh0z6vhq` for `Retail` channel @@ -96,7 +96,8 @@ This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. -[wcap.exe]: https://raw.githubusercontent.com/wiki/mmozeiko/wcap/wcap.exe +[wcap-x64.exe]: https://raw.githubusercontent.com/wiki/mmozeiko/wcap/wcap-x64.exe +[wcap-arm64.exe]: https://raw.githubusercontent.com/wiki/mmozeiko/wcap/wcap-arm64.exe [wgc]: https://blogs.windows.com/windowsdeveloper/2019/09/16/new-ways-to-do-screen-capture/ [MSMFH264]: https://docs.microsoft.com/en-us/windows/win32/medfound/h-264-video-encoder [VS]: https://visualstudio.microsoft.com/vs/ diff --git a/build.cmd b/build.cmd index 1bdbae9..e7ff8aa 100644 --- a/build.cmd +++ b/build.cmd @@ -1,31 +1,47 @@ @echo off setlocal enabledelayedexpansion +if "%PROCESSOR_ARCHITECTURE%" equ "AMD64" ( + set HOST_ARCH=x64 +) else if "%PROCESSOR_ARCHITECTURE%" equ "ARM64" ( + set HOST_ARCH=arm64 +) + +set ARGS=%* +if "%ARGS%" equ "" set ARGS=%HOST_ARCH% + +if "%ARGS:x64=%" neq "!ARGS!" ( + set TARGET_ARCH=x64 +) else if "%ARGS:arm64=%" neq "!ARGS!" ( + set TARGET_ARCH=arm64 +) else ( + set TARGET_ARCH=%HOST_ARCH% +) + where /Q cl.exe || ( set __VSCMD_ARG_NO_LOGO=1 for /f "tokens=*" %%i in ('"C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe" -latest -requires Microsoft.VisualStudio.Workload.NativeDesktop -property installationPath') do set VS=%%i if "!VS!" equ "" ( echo ERROR: Visual Studio installation not found exit /b 1 - ) - call "!VS!\VC\Auxiliary\Build\vcvarsall.bat" amd64 || exit /b 1 -) - -if "%VSCMD_ARG_TGT_ARCH%" neq "x64" ( - echo ERROR: please run this from MSVC x64 native tools command prompt, 32-bit target is not supported! - exit /b 1 + ) + call "!VS!\Common7\Tools\VsDevCmd.bat" -arch=%TARGET_ARCH% -host_arch=%HOST_ARCH% -startdir=none -no_logo || exit /b 1 ) -if "%1" equ "debug" ( - set CL=/MTd /Od /Zi /D_DEBUG /RTC1 /Fdwcap.pdb /fsanitize=address +if "%ARGS:debug=%" neq "%ARGS%" ( + set CL=/MTd /Od /Z7 /D_DEBUG /RTC1 set LINK=/DEBUG set FXC=/Od /Zi + if "%TARGET_ARCH%" equ "x64" set CL=!CL! /fsanitize=address ) else ( set CL=/GL /O1 /Oi /DNDEBUG /GS- set LINK=/LTCG /OPT:REF /OPT:ICF ucrt.lib libvcruntime.lib set FXC=/O3 /Qstrip_reflect /Qstrip_debug /Qstrip_priv ) +if "%TARGET_ARCH%" equ "arm64" set CL=%CL% /arch:armv8.1 +if "%TARGET_ARCH%" equ "x64" set LINK=%LINK% /FIXED /merge:_RDATA=.rdata + call :fxc ResizePassH || exit /b 1 call :fxc ResizePassV || exit /b 1 call :fxc ResizeLinearPassH || exit /b 1 @@ -37,7 +53,7 @@ call :fxc ConvertPass2 || exit /b 1 for /f %%i in ('call git describe --always --dirty') do set CL=%CL% -DWCAP_GIT_INFO=\"%%i\" rc.exe /nologo wcap.rc || exit /b 1 -cl.exe /nologo /W3 /WX wcap.c wcap.res /link /INCREMENTAL:NO /MANIFEST:EMBED /MANIFESTINPUT:wcap.manifest /SUBSYSTEM:WINDOWS /FIXED /merge:_RDATA=.rdata || exit /b 1 +cl.exe /nologo /std:c11 /experimental:c11atomics /W3 /WX wcap.c wcap.res /Fewcap-%TARGET_ARCH%.exe /link /INCREMENTAL:NO /MANIFEST:EMBED /MANIFESTINPUT:wcap.manifest /SUBSYSTEM:WINDOWS || exit /b 1 del *.obj *.res >nul goto :eof diff --git a/wcap.c b/wcap.c index b3d4c97..4bf61ce 100644 --- a/wcap.c +++ b/wcap.c @@ -37,8 +37,10 @@ #pragma comment (lib, "OneCore") #pragma comment (lib, "CoreMessaging") +#if defined(_M_AMD64) // this is needed to be able to use Nvidia Media Foundation encoders on Optimus systems __declspec(dllexport) DWORD NvOptimusEnablement = 1; +#endif #define WM_WCAP_ALREADY_RUNNING (WM_USER+1) #define WM_WCAP_STOP_CAPTURE (WM_USER+2) diff --git a/wcap.h b/wcap.h index acf1cc4..a38871e 100644 --- a/wcap.h +++ b/wcap.h @@ -12,6 +12,7 @@ #include #include #include +#include #define WCAP_TITLE L"wcap" #define WCAP_URL L"https://github.com/mmozeiko/wcap" diff --git a/wcap_audio_capture.h b/wcap_audio_capture.h index e05e6e6..8f94c71 100644 --- a/wcap_audio_capture.h +++ b/wcap_audio_capture.h @@ -25,8 +25,8 @@ typedef struct uint8_t* Buffer; uint32_t BufferSize; - uint32_t BufferRead; - uint32_t BufferWrite; + _Atomic(uint32_t) BufferRead; + _Atomic(uint32_t) BufferWrite; } AudioCapture; @@ -81,7 +81,7 @@ DEFINE_GUID(IID_IActivateAudioInterfaceCompletionHandler, 0x41d949ab, 0x9862, 0x typedef struct { IActivateAudioInterfaceCompletionHandler Handler; - uint32_t ReadyFlag; + _Atomic(uint32_t) ReadyFlag; } AudioCaptureActivate; @@ -115,8 +115,8 @@ static HRESULT STDMETHODCALLTYPE AudioCaptureActivate__ActivateCompleted(IActiva { AudioCaptureActivate* Activate = CONTAINING_RECORD(This, AudioCaptureActivate, Handler); - InterlockedIncrement(&Activate->ReadyFlag); - WakeByAddressSingle(&Activate->ReadyFlag); + atomic_store_explicit(&Activate->ReadyFlag, 1, memory_order_release); + WakeByAddressSingle((PVOID)&Activate->ReadyFlag); return S_OK; } @@ -157,7 +157,7 @@ static DWORD CALLBACK AudioCapture__Thread(LPVOID Arg) UINT64 Timestamp = 0; // in QPC unuts while (SUCCEEDED(IAudioCaptureClient_GetBuffer(CaptureClient, &Buffer, &Frames, &Flags, &Position, &Timestamp)) && Frames != 0) { - uint32_t BufferAvailable = BufferSize - (BufferWrite - Capture->BufferRead); + uint32_t BufferAvailable = BufferSize - (BufferWrite - atomic_load_explicit(&Capture->BufferRead, memory_order_relaxed)); uint32_t WriteSize = sizeof(Frames) + sizeof(Position) + sizeof(Timestamp) + Frames * BytesPerFrame; if (WriteSize <= BufferAvailable) @@ -174,7 +174,9 @@ static DWORD CALLBACK AudioCapture__Thread(LPVOID Arg) { CopyMemory(BufferPtr, Buffer, Frames * BytesPerFrame); } - BufferWrite = InterlockedAdd(&Capture->BufferWrite, WriteSize); + + BufferWrite += WriteSize; + atomic_store_explicit(&Capture->BufferWrite, BufferWrite, memory_order_release); } else { @@ -225,6 +227,7 @@ bool AudioCapture_Start(AudioCapture* Capture, HWND ApplicationWindow) { .Handler.lpVtbl = &AudioCaptureActivateVtbl, }; + atomic_init(&ActivateCompletion.ReadyFlag, 0); IActivateAudioInterfaceAsyncOperation* AsyncOperation; if (FAILED(ActivateAudioInterfaceAsync(VIRTUAL_AUDIO_DEVICE_PROCESS_LOOPBACK, &IID_IAudioClient, &Params, &ActivateCompletion.Handler, &AsyncOperation))) @@ -232,10 +235,10 @@ bool AudioCapture_Start(AudioCapture* Capture, HWND ApplicationWindow) return false; } - while (!ActivateCompletion.ReadyFlag) + while (!atomic_load_explicit(&ActivateCompletion.ReadyFlag, memory_order_acquire)) { uint32_t ReadyFlag = 0; - WaitOnAddress(&ActivateCompletion.ReadyFlag, &ReadyFlag, sizeof(ReadyFlag), INFINITE); + WaitOnAddress((PVOID)&ActivateCompletion.ReadyFlag, &ReadyFlag, sizeof(ReadyFlag), INFINITE); } HRESULT ActivateResult; @@ -375,8 +378,8 @@ bool AudioCapture_Start(AudioCapture* Capture, HWND ApplicationWindow) Capture->Buffer = View1; Capture->BufferSize = BufferSize; - Capture->BufferRead = 0; - Capture->BufferWrite = 0; + atomic_init(&Capture->BufferRead, 0); + atomic_init(&Capture->BufferWrite, 0); Capture->Stop = false; @@ -438,13 +441,14 @@ bool AudioCapture_GetData(AudioCapture* Capture, AudioCaptureData* Data, uint64_ uint64_t Position; uint64_t Timestamp; - uint32_t AvailableSize = Capture->BufferWrite - Capture->BufferRead; + uint32_t BufferRead = atomic_load_explicit(&Capture->BufferRead, memory_order_relaxed); + uint32_t AvailableSize = atomic_load_explicit(&Capture->BufferWrite, memory_order_acquire) - BufferRead; if (AvailableSize < sizeof(Frames) + sizeof(Position) + sizeof(Timestamp)) { return false; } - uint8_t* BufferPtr = Capture->Buffer + (Capture->BufferRead & (Capture->BufferSize - 1)); + uint8_t* BufferPtr = Capture->Buffer + (BufferRead & (Capture->BufferSize - 1)); CopyMemory(&Frames, BufferPtr, sizeof(Frames)); BufferPtr += sizeof(Frames); CopyMemory(&Position, BufferPtr, sizeof(Position)); BufferPtr += sizeof(Position); CopyMemory(&Timestamp, BufferPtr, sizeof(Timestamp)); BufferPtr += sizeof(Timestamp); @@ -490,6 +494,6 @@ bool AudioCapture_GetData(AudioCapture* Capture, AudioCaptureData* Data, uint64_ void AudioCapture_ReleaseData(AudioCapture* Capture, AudioCaptureData* Data) { uint32_t ReadSize = (uint32_t)(sizeof(uint32_t) + sizeof(uint64_t) + sizeof(uint64_t) + Data->Count * Capture->Format->nBlockAlign); - Assert(ReadSize <= Capture->BufferWrite - Capture->BufferRead); - InterlockedAdd(&Capture->BufferRead, ReadSize); + Assert(ReadSize <= atomic_load_explicit(&Capture->BufferWrite, memory_order_relaxed) - atomic_load_explicit(&Capture->BufferRead, memory_order_relaxed)); + atomic_fetch_add_explicit(&Capture->BufferRead, ReadSize, memory_order_release); } diff --git a/wcap_encoder.h b/wcap_encoder.h index be73452..9b81a68 100644 --- a/wcap_encoder.h +++ b/wcap_encoder.h @@ -39,16 +39,16 @@ typedef struct TexResize Resize; YuvConvert Convert; - YuvConvertOutput ConvertOutput[ENCODER_VIDEO_BUFFER_COUNT]; - IMFSample* VideoSample[ENCODER_VIDEO_BUFFER_COUNT]; - uint64_t VideoSampleAvailable; + YuvConvertOutput ConvertOutput[ENCODER_VIDEO_BUFFER_COUNT]; + IMFSample* VideoSample[ENCODER_VIDEO_BUFFER_COUNT]; + _Atomic(uint64_t) VideoSampleAvailable; BOOL VideoDiscontinuity; UINT64 VideoLastTime; - IMFTransform* Resampler; - IMFSample* AudioSample[ENCODER_AUDIO_BUFFER_COUNT]; - uint64_t AudioSampleAvailable; + IMFTransform* Resampler; + IMFSample* AudioSample[ENCODER_AUDIO_BUFFER_COUNT]; + _Atomic(uint64_t) AudioSampleAvailable; IMFSample* AudioInputSample; DWORD AudioFrameSize; @@ -136,8 +136,8 @@ static HRESULT STDMETHODCALLTYPE Encoder__VideoInvoke(IMFAsyncCallback* this, IM { if (Sample == Enc->VideoSample[Index]) { - _InterlockedOr64(&Enc->VideoSampleAvailable, 1ULL << Index); - WakeByAddressSingle(&Enc->VideoSampleAvailable); + atomic_fetch_or(&Enc->VideoSampleAvailable, 1ULL << Index); + WakeByAddressSingle((PVOID)&Enc->VideoSampleAvailable); break; } } @@ -161,8 +161,8 @@ static HRESULT STDMETHODCALLTYPE Encoder__AudioInvoke(IMFAsyncCallback* this, IM { if (Sample == Enc->AudioSample[Index]) { - _InterlockedOr64(&Enc->AudioSampleAvailable, 1ULL << Index); - WakeByAddressSingle(&Enc->AudioSampleAvailable); + atomic_fetch_or(&Enc->AudioSampleAvailable, 1ULL << Index); + WakeByAddressSingle((PVOID)&Enc->AudioSampleAvailable); break; } } @@ -193,12 +193,12 @@ static void Encoder__OutputAudioSamples(Encoder* Encoder) for (;;) { // we don't want to drop any audio frames, so wait for available sample/buffer - uint64_t Available = Encoder->AudioSampleAvailable; + uint64_t Available = atomic_load(&Encoder->AudioSampleAvailable); while (Available == 0) { uint64_t Zero = 0; - WaitOnAddress(&Encoder->AudioSampleAvailable, &Zero, sizeof(Zero), INFINITE); - Available = Encoder->AudioSampleAvailable; + WaitOnAddress((PVOID)&Encoder->AudioSampleAvailable, &Zero, sizeof(Zero), INFINITE); + Available = atomic_load(&Encoder->AudioSampleAvailable); } DWORD Index; @@ -216,7 +216,7 @@ static void Encoder__OutputAudioSamples(Encoder* Encoder) } Assert(SUCCEEDED(hr)); - _InterlockedAnd64(&Encoder->AudioSampleAvailable, ~(1ULL << Index)); + atomic_fetch_and(&Encoder->AudioSampleAvailable, ~(1ULL << Index)); IMFTrackedSample* Tracked; HR(IMFSample_QueryInterface(Sample, &IID_IMFTrackedSample, (LPVOID*)&Tracked)); @@ -363,6 +363,64 @@ BOOL Encoder_Start(Encoder* Encoder, ID3D11Device* Device, LPWSTR FileName, cons Profile = eAVEncH265VProfile_Main_420_10; } + // make sure MFT video encoder exists, some vendors wrongly allow SinkWriter to be created for invalid configuration + { + bool Ok = false; + + MFT_REGISTER_TYPE_INFO InputType = { MFMediaType_Video, *MediaFormatYUV }; + MFT_REGISTER_TYPE_INFO OutputType = { MFMediaType_Video, *Codec }; + + IMFAttributes* EnumAttributes; + HR(MFCreateAttributes(&EnumAttributes, 1)); + + UINT32 Flags = MFT_ENUM_FLAG_SORTANDFILTER; + + if (Config->Config->HardwareEncoder) + { + IDXGIDevice* DxgiDevice; + HR(ID3D11Device_QueryInterface(Device, &IID_IDXGIDevice, (void**)&DxgiDevice)); + + IDXGIAdapter* DxgiAdapter; + HR(IDXGIDevice_GetAdapter(DxgiDevice, &DxgiAdapter)); + IDXGIDevice_Release(DxgiDevice); + + DXGI_ADAPTER_DESC AdapterDesc; + IDXGIAdapter_GetDesc(DxgiAdapter, &AdapterDesc); + IDXGIAdapter_Release(DxgiAdapter); + + HR(IMFAttributes_SetBlob(EnumAttributes, &MFT_ENUM_ADAPTER_LUID, (UINT8*)&AdapterDesc.AdapterLuid, sizeof(AdapterDesc.AdapterLuid))); + + Flags |= MFT_ENUM_FLAG_ASYNCMFT | MFT_ENUM_FLAG_HARDWARE; + } + else + { + Flags |= MFT_ENUM_FLAG_SYNCMFT; + } + + UINT32 ActivateCount = 0; + IMFActivate** Activate = NULL; + if (SUCCEEDED(MFTEnum2(MFT_CATEGORY_VIDEO_ENCODER, Flags, &InputType, &OutputType, EnumAttributes, &Activate, &ActivateCount)) && ActivateCount != 0) + { + Ok = true; + } + + if (Activate) + { + for (size_t ActivateIndex = 0; ActivateIndex != ActivateCount; ActivateIndex++) + { + IMFActivate_Release(Activate[ActivateIndex]); + } + CoTaskMemFree(Activate); + } + IMFAttributes_Release(EnumAttributes); + + if (!Ok) + { + MessageBoxW(NULL, L"Cannot find video encoder!", WCAP_TITLE, MB_ICONERROR); + goto bail; + } + } + // https://github.com/mpv-player/mpv/blob/release/0.38/video/csputils.c#L150-L153 bool IsHD = (OutputWidth >= 1280) || (OutputHeight > 576) || Config->Config->VideoCodec == CONFIG_VIDEO_H265; @@ -579,9 +637,6 @@ BOOL Encoder_Start(Encoder* Encoder, ID3D11Device* Device, LPWSTR FileName, cons YuvColorSpace ColorSpace = IsHD ? YuvColorSpace_BT709 : YuvColorSpace_BT601; YuvConvert_Create(&Encoder->Convert, Device, Encoder->Resize.OutputTexture, OutputWidth, OutputHeight, ColorSpace, Config->Config->ImprovedColorConversion); - UINT32 Size; - HR(MFCalculateImageSize(MediaFormatYUV, OutputWidth, OutputHeight, &Size)); - for (size_t OutputIndex = 0; OutputIndex < ENCODER_VIDEO_BUFFER_COUNT; OutputIndex++) { YuvConvertOutput_Create(&Encoder->ConvertOutput[OutputIndex], Device, OutputWidth, OutputHeight, ConvertFormat); @@ -593,7 +648,10 @@ BOOL Encoder_Start(Encoder* Encoder, ID3D11Device* Device, LPWSTR FileName, cons IMFMediaBuffer* Buffer; HR(MFCreateDXGISurfaceBuffer(&IID_ID3D11Texture2D, (IUnknown*)ConvertOutputTexture, 0, FALSE, &Buffer)); - HR(IMFMediaBuffer_SetCurrentLength(Buffer, Size)); + UINT32 MaxLength; + HR(IMFMediaBuffer_GetMaxLength(Buffer, &MaxLength)); + HR(IMFMediaBuffer_SetCurrentLength(Buffer, MaxLength)); + HR(IMFSample_AddBuffer(VideoSample, Buffer)); IMFMediaBuffer_Release(Buffer); @@ -611,7 +669,7 @@ BOOL Encoder_Start(Encoder* Encoder, ID3D11Device* Device, LPWSTR FileName, cons Encoder->VideoLastTime = 0x8000000000000000ULL; // some large time in future Assert(ENCODER_VIDEO_BUFFER_COUNT <= 64); - Encoder->VideoSampleAvailable = (1ULL << ENCODER_VIDEO_BUFFER_COUNT) - 1; + atomic_init(&Encoder->VideoSampleAvailable, (1ULL << ENCODER_VIDEO_BUFFER_COUNT) - 1); if (Encoder->AudioStreamIndex >= 0) { @@ -640,7 +698,7 @@ BOOL Encoder_Start(Encoder* Encoder, ID3D11Device* Device, LPWSTR FileName, cons Encoder->Resampler = Resampler; Assert(ENCODER_AUDIO_BUFFER_COUNT <= 64); - Encoder->AudioSampleAvailable = (1ULL << ENCODER_AUDIO_BUFFER_COUNT) - 1; + atomic_init(&Encoder->AudioSampleAvailable, (1ULL << ENCODER_AUDIO_BUFFER_COUNT) - 1); } ID3D11DeviceContext_AddRef(Context); @@ -709,7 +767,7 @@ BOOL Encoder_NewFrame(Encoder* Encoder, ID3D11Texture2D* Texture, RECT Rect, UIN { Encoder->VideoLastTime = Time; - uint64_t Available = Encoder->VideoSampleAvailable; + uint64_t Available = atomic_load(&Encoder->VideoSampleAvailable); if (Available == 0) { // dropped frame @@ -721,7 +779,7 @@ BOOL Encoder_NewFrame(Encoder* Encoder, ID3D11Texture2D* Texture, RECT Rect, UIN DWORD Index; _BitScanForward64(&Index, Available); - _InterlockedAnd64(&Encoder->VideoSampleAvailable, ~(1ULL << Index)); + atomic_fetch_and(&Encoder->VideoSampleAvailable, ~(1ULL << Index)); ID3D11DeviceContext* Context = Encoder->Context; ID3D11Multithread_Enter(Encoder->Multithread);