1use std::ptr::null_mut;
2
3use crossbeam::channel::{bounded, Receiver, Sender};
4use cust::{
5 prelude::Context,
6 sys::{cuCtxSetCurrent, cuMemcpy2D_v2, CUDA_MEMCPY2D_v2, CUdeviceptr, CUmemorytype, CUresult},
7};
8use ffmpeg_next::{
9 self as ffmpeg,
10 ffi::{
11 av_buffer_ref, av_buffer_unref, av_hwdevice_ctx_alloc, av_hwdevice_ctx_init,
12 av_hwframe_ctx_init, av_hwframe_get_buffer, AVHWDeviceContext, AVHWFramesContext,
13 AVPixelFormat,
14 },
15 Rational,
16};
17use pipewire as pw;
18
19use crate::{
20 encoders::video::{PipewireSPA, ProcessingThread, VideoEncoder},
21 types::{
22 config::QualityPreset,
23 error::{Result, WaycapError},
24 video_frame::{EncodedVideoFrame, RawVideoFrame},
25 },
26 utils::{extract_dmabuf_planes, TIME_UNIT_NS},
27};
28
29use super::{
30 cuda::AVCUDADeviceContext,
31 video::{create_hw_frame_ctx, GOP_SIZE},
32};
33
34#[cfg(feature = "vulkan")]
36use std::os::unix::io::RawFd;
37#[cfg(feature = "vulkan")]
38use cust::{external::ExternalMemory, memory::DevicePointer};
39#[cfg(feature = "vulkan")]
40use crate::waycap_vulkan::VulkanContext;
41
42#[cfg(feature = "egl")]
44use cust::sys::{
45 cuGraphicsMapResources, cuGraphicsResourceSetMapFlags_v2,
46 cuGraphicsSubResourceGetMappedArray, cuGraphicsUnmapResources, cuGraphicsUnregisterResource,
47 CUarray, CUgraphicsResource,
48};
49#[cfg(feature = "egl")]
50use khronos_egl::Image;
51#[cfg(feature = "egl")]
52use crate::waycap_egl::EglContext;
53#[cfg(feature = "egl")]
54use super::cuda::cuGraphicsGLRegisterImage;
55
56const NVIDIA_MODIFIERS: &[i64] = &[
60 216172782120099856,
61 216172782120099857,
62 216172782120099858,
63 216172782120099859,
64 216172782120099860,
65 216172782120099861,
66 216172782128496656,
67 216172782128496657,
68 216172782128496658,
69 216172782128496659,
70 216172782128496660,
71 216172782128496661,
72 72057594037927935,
73];
74
75pub struct NvencEncoder {
76 encoder: Option<ffmpeg::codec::encoder::Video>,
77 width: u32,
78 height: u32,
79 encoder_name: String,
80 quality: QualityPreset,
81 encoded_frame_recv: Option<Receiver<EncodedVideoFrame>>,
82 encoded_frame_sender: Sender<EncodedVideoFrame>,
83
84 cuda_ctx: Context,
85
86 #[cfg(feature = "vulkan")]
88 vulkan_ctx: Option<Box<VulkanContext>>,
89 #[cfg(feature = "vulkan")]
90 persistent_memory_fd: RawFd,
91 #[cfg(feature = "vulkan")]
92 persistent_buffer_size: u64,
93 #[cfg(feature = "vulkan")]
94 cuda_ext_memory: Option<ExternalMemory>,
95 #[cfg(feature = "vulkan")]
96 cuda_device_ptr: DevicePointer<u8>,
97
98 #[cfg(feature = "egl")]
100 graphics_resource: CUgraphicsResource,
101 #[cfg(feature = "egl")]
102 egl_context: Option<Box<EglContext>>,
103 #[cfg(feature = "egl")]
104 egl_texture: u32,
105}
106
107unsafe impl Send for NvencEncoder {}
108unsafe impl Sync for NvencEncoder {}
109
110impl VideoEncoder for NvencEncoder {
111 type Output = EncodedVideoFrame;
112
113 fn reset(&mut self) -> Result<()> {
114 self.drop_processor();
115 let new_encoder = Self::create_encoder(
116 self.width,
117 self.height,
118 &self.encoder_name,
119 &self.quality,
120 &self.cuda_ctx,
121 )?;
122 self.encoder = Some(new_encoder);
123 Ok(())
124 }
125
126 fn drop_processor(&mut self) {
127 self.encoder.take();
128 }
129
130 fn output(&mut self) -> Option<Receiver<EncodedVideoFrame>> {
131 self.encoded_frame_recv.clone()
132 }
133
134 fn drain(&mut self) -> Result<()> {
135 if let Some(ref mut encoder) = self.encoder {
136 encoder.send_eof()?;
137 let mut packet = ffmpeg::codec::packet::Packet::empty();
138 while encoder.receive_packet(&mut packet).is_ok() {}
139 }
140 Ok(())
141 }
142
143 fn get_encoder(&self) -> &Option<ffmpeg::codec::encoder::Video> {
144 &self.encoder
145 }
146}
147
148impl ProcessingThread for NvencEncoder {
149 fn thread_setup(&mut self) -> Result<()> {
150 #[cfg(feature = "vulkan")]
151 {
152 self.make_current()?;
153
154 let ext_mem = unsafe {
155 ExternalMemory::import(
156 self.persistent_memory_fd,
157 self.persistent_buffer_size as usize,
158 )
159 }
160 .map_err(|e| WaycapError::Init(format!("CUDA ExternalMemory::import failed: {e:?}")))?;
161
162 let device_ptr: DevicePointer<u8> = ext_mem
163 .mapped_buffer(self.persistent_buffer_size as usize, 0)
164 .map_err(|e| WaycapError::Init(format!("CUDA mapped_buffer failed: {e:?}")))?;
165
166 self.cuda_ext_memory = Some(ext_mem);
167 self.cuda_device_ptr = device_ptr;
168 }
169 #[cfg(feature = "egl")]
170 {
171 self.egl_context = Some(Box::new(EglContext::new(
172 self.width as i32,
173 self.height as i32,
174 )?));
175 self.make_current()?;
176 self.init_gl(None)?;
177 }
178 Ok(())
179 }
180
181 fn thread_teardown(&mut self) -> Result<()> {
182 #[cfg(feature = "egl")]
183 return self.egl_context.as_ref().unwrap().release_current();
184 #[cfg(feature = "vulkan")]
185 Ok(())
186 }
187
188 fn process(&mut self, frame: RawVideoFrame) -> Result<()> {
189 #[cfg(feature = "vulkan")]
190 {
191 let vulkan_ctx = self
192 .vulkan_ctx
193 .as_ref()
194 .ok_or("Vulkan context not initialized")?;
195
196 let planes = extract_dmabuf_planes(&frame)?;
197 vulkan_ctx.copy_dmabuf_to_persistent_buffer(
198 &planes,
199 frame.modifier,
200 frame.dimensions.width,
201 frame.dimensions.height,
202 )?;
203
204 if let Some(ref mut encoder) = self.encoder {
205 let mut cuda_frame = ffmpeg::util::frame::Video::new(
206 ffmpeg_next::format::Pixel::CUDA,
207 encoder.width(),
208 encoder.height(),
209 );
210
211 unsafe {
212 let ret = av_hwframe_get_buffer(
213 (*encoder.as_ptr()).hw_frames_ctx,
214 cuda_frame.as_mut_ptr(),
215 0,
216 );
217 if ret < 0 {
218 return Err(WaycapError::Encoding(format!(
219 "Failed to allocate CUDA frame buffer: {ret}"
220 )));
221 }
222
223 let copy_params = CUDA_MEMCPY2D_v2 {
224 srcMemoryType: CUmemorytype::CU_MEMORYTYPE_DEVICE,
225 srcDevice: self.cuda_device_ptr.as_raw(),
226 srcPitch: (encoder.width() * 4) as usize,
227 srcXInBytes: 0,
228 srcY: 0,
229 srcHost: std::ptr::null(),
230 srcArray: null_mut(),
231
232 dstMemoryType: CUmemorytype::CU_MEMORYTYPE_DEVICE,
233 dstDevice: (*cuda_frame.as_ptr()).data[0] as CUdeviceptr,
234 dstPitch: (*cuda_frame.as_ptr()).linesize[0] as usize,
235 dstXInBytes: 0,
236 dstY: 0,
237 dstHost: null_mut(),
238 dstArray: null_mut(),
239
240 WidthInBytes: (encoder.width() * 4) as usize,
241 Height: encoder.height() as usize,
242 };
243
244 let result = cuMemcpy2D_v2(©_params);
245 if result != CUresult::CUDA_SUCCESS {
246 return Err(WaycapError::Encoding(format!(
247 "cuMemcpy2D_v2 failed: {result:?}"
248 )));
249 }
250 }
251
252 cuda_frame.set_pts(Some(frame.timestamp));
253 encoder.send_frame(&cuda_frame)?;
254
255 let mut packet = ffmpeg::codec::packet::Packet::empty();
256 if encoder.receive_packet(&mut packet).is_ok() {
257 if let Some(data) = packet.data() {
258 match self.encoded_frame_sender.try_send(EncodedVideoFrame {
259 data: data.to_vec(),
260 is_keyframe: packet.is_key(),
261 pts: packet.pts().unwrap_or(0),
262 dts: packet.dts().unwrap_or(0),
263 }) {
264 Ok(_) => {}
265 Err(crossbeam::channel::TrySendError::Full(_)) => {
266 log::error!(
267 "Could not send encoded video frame. Receiver is full"
268 );
269 }
270 Err(crossbeam::channel::TrySendError::Disconnected(_)) => {
271 log::error!(
272 "Could not send encoded video frame. Receiver disconnected"
273 );
274 }
275 }
276 }
277 }
278 }
279 }
280 #[cfg(feature = "egl")]
281 {
282 match egl_img_from_dmabuf(self.egl_context.as_ref().unwrap(), &frame) {
283 Ok(img) => {
284 if let Some(ref mut encoder) = self.encoder {
285 let mut cuda_frame = ffmpeg::util::frame::Video::new(
286 ffmpeg_next::format::Pixel::CUDA,
287 encoder.width(),
288 encoder.height(),
289 );
290
291 unsafe {
292 let ret = av_hwframe_get_buffer(
293 (*encoder.as_ptr()).hw_frames_ctx,
294 cuda_frame.as_mut_ptr(),
295 0,
296 );
297 if ret < 0 {
298 return Err(WaycapError::Encoding(format!(
299 "Failed to allocate CUDA frame buffer: {ret}",
300 )));
301 }
302
303 let result =
304 cuGraphicsMapResources(1, &mut self.graphics_resource, null_mut());
305 if result != CUresult::CUDA_SUCCESS {
306 gl::BindTexture(gl::TEXTURE_2D, 0);
307 return Err(WaycapError::Encoding(format!(
308 "Error mapping GL image to CUDA: {result:?}",
309 )));
310 }
311
312 let mut cuda_array: CUarray = null_mut();
313
314 let result = cuGraphicsSubResourceGetMappedArray(
315 &mut cuda_array,
316 self.graphics_resource,
317 0,
318 0,
319 );
320 if result != CUresult::CUDA_SUCCESS {
321 cuGraphicsUnmapResources(
322 1,
323 &mut self.graphics_resource,
324 null_mut(),
325 );
326 gl::BindTexture(gl::TEXTURE_2D, 0);
327 return Err(WaycapError::Encoding(format!(
328 "Error getting CUDA Array: {result:?}",
329 )));
330 }
331
332 let copy_params = CUDA_MEMCPY2D_v2 {
333 srcMemoryType: CUmemorytype::CU_MEMORYTYPE_ARRAY,
334 srcArray: cuda_array,
335 srcXInBytes: 0,
336 srcY: 0,
337 srcHost: std::ptr::null(),
338 srcDevice: 0,
339 srcPitch: 0,
340
341 dstMemoryType: CUmemorytype::CU_MEMORYTYPE_DEVICE,
342 dstDevice: (*cuda_frame.as_ptr()).data[0] as CUdeviceptr,
343 dstPitch: (*cuda_frame.as_ptr()).linesize[0] as usize,
344 dstXInBytes: 0,
345 dstY: 0,
346 dstHost: std::ptr::null_mut(),
347 dstArray: std::ptr::null_mut(),
348
349 WidthInBytes: (encoder.width() * 4) as usize,
351 Height: encoder.height() as usize,
352 };
353
354 let result = cuMemcpy2D_v2(©_params);
355 if result != CUresult::CUDA_SUCCESS {
356 cuGraphicsUnmapResources(
357 1,
358 &mut self.graphics_resource,
359 null_mut(),
360 );
361 gl::BindTexture(gl::TEXTURE_2D, 0);
362 return Err(WaycapError::Encoding(format!(
363 "Error mapping cuda frame: {result:?}",
364 )));
365 }
366
367 let result = cuGraphicsUnmapResources(
368 1,
369 &mut self.graphics_resource,
370 null_mut(),
371 );
372 if result != CUresult::CUDA_SUCCESS {
373 return Err(WaycapError::Encoding(format!(
374 "Could not unmap resource: {result:?}",
375 )));
376 }
377
378 gl::BindTexture(gl::TEXTURE_2D, 0);
379 }
380
381 cuda_frame.set_pts(Some(frame.timestamp));
382 encoder.send_frame(&cuda_frame)?;
383
384 let mut packet = ffmpeg::codec::packet::Packet::empty();
385 if encoder.receive_packet(&mut packet).is_ok() {
386 if let Some(data) = packet.data() {
387 match self.encoded_frame_sender.try_send(EncodedVideoFrame {
388 data: data.to_vec(),
389 is_keyframe: packet.is_key(),
390 pts: packet.pts().unwrap_or(0),
391 dts: packet.dts().unwrap_or(0),
392 }) {
393 Ok(_) => {}
394 Err(crossbeam::channel::TrySendError::Full(_)) => {
395 log::error!(
396 "Could not send encoded video frame. Receiver is full"
397 );
398 }
399 Err(crossbeam::channel::TrySendError::Disconnected(_)) => {
400 log::error!(
401 "Could not send encoded video frame. Receiver disconnected"
402 );
403 }
404 }
405 };
406 }
407 }
408 self.egl_context.as_ref().unwrap().destroy_image(img)?;
409 }
410 Err(e) => log::error!("Could not process dma buf frame: {e:?}"),
411 }
412 }
413 Ok(())
414 }
415}
416
417impl PipewireSPA for NvencEncoder {
418 fn get_spa_definition() -> Result<pw::spa::pod::Object> {
419 let nvidia_mod_property = pw::spa::pod::Property {
420 key: pw::spa::param::format::FormatProperties::VideoModifier.as_raw(),
421 flags: pw::spa::pod::PropertyFlags::empty(),
422 value: pw::spa::pod::Value::Choice(pw::spa::pod::ChoiceValue::Long(
423 pw::spa::utils::Choice::<i64>(
424 pw::spa::utils::ChoiceFlags::empty(),
425 pw::spa::utils::ChoiceEnum::<i64>::Enum {
426 default: NVIDIA_MODIFIERS[0],
427 alternatives: NVIDIA_MODIFIERS.to_vec(),
428 },
429 ),
430 )),
431 };
432
433 Ok(pw::spa::pod::object!(
434 pw::spa::utils::SpaTypes::ObjectParamFormat,
435 pw::spa::param::ParamType::EnumFormat,
436 pw::spa::pod::property!(
437 pw::spa::param::format::FormatProperties::MediaType,
438 Id,
439 pw::spa::param::format::MediaType::Video
440 ),
441 pw::spa::pod::property!(
442 pw::spa::param::format::FormatProperties::MediaSubtype,
443 Id,
444 pw::spa::param::format::MediaSubtype::Raw
445 ),
446 nvidia_mod_property,
447 pw::spa::pod::property!(
448 pw::spa::param::format::FormatProperties::VideoFormat,
449 Choice,
450 Enum,
451 Id,
452 pw::spa::param::video::VideoFormat::NV12,
453 pw::spa::param::video::VideoFormat::I420,
454 pw::spa::param::video::VideoFormat::BGRA
455 ),
456 pw::spa::pod::property!(
457 pw::spa::param::format::FormatProperties::VideoSize,
458 Choice,
459 Range,
460 Rectangle,
461 pw::spa::utils::Rectangle {
462 width: 2560,
463 height: 1440
464 },
465 pw::spa::utils::Rectangle {
466 width: 1,
467 height: 1
468 },
469 pw::spa::utils::Rectangle {
470 width: 4096,
471 height: 4096
472 }
473 ),
474 pw::spa::pod::property!(
475 pw::spa::param::format::FormatProperties::VideoFramerate,
476 Choice,
477 Range,
478 Fraction,
479 pw::spa::utils::Fraction { num: 240, denom: 1 },
480 pw::spa::utils::Fraction { num: 0, denom: 1 },
481 pw::spa::utils::Fraction { num: 244, denom: 1 }
482 ),
483 ))
484 }
485}
486
487impl NvencEncoder {
488 pub(crate) fn new(width: u32, height: u32, quality: QualityPreset) -> Result<Self> {
489 let encoder_name = "h264_nvenc";
490 let (frame_tx, frame_rx) = bounded(10);
491 let cuda_ctx = cust::quick_init().unwrap();
492 let encoder = Self::create_encoder(width, height, encoder_name, &quality, &cuda_ctx)?;
493
494 #[cfg(feature = "vulkan")]
495 let vulkan_ctx = Box::new(VulkanContext::new(width, height)?);
496 #[cfg(feature = "vulkan")]
497 let persistent_memory_fd = vulkan_ctx.export_persistent_memory_fd()?;
498 #[cfg(feature = "vulkan")]
499 let persistent_buffer_size = vulkan_ctx.get_persistent_buffer_size();
500
501 Ok(Self {
502 encoder: Some(encoder),
503 width,
504 height,
505 encoder_name: encoder_name.to_string(),
506 quality,
507 encoded_frame_recv: Some(frame_rx),
508 encoded_frame_sender: frame_tx,
509 cuda_ctx,
510 #[cfg(feature = "vulkan")]
511 vulkan_ctx: Some(vulkan_ctx),
512 #[cfg(feature = "vulkan")]
513 persistent_memory_fd,
514 #[cfg(feature = "vulkan")]
515 persistent_buffer_size,
516 #[cfg(feature = "vulkan")]
517 cuda_ext_memory: None,
518 #[cfg(feature = "vulkan")]
519 cuda_device_ptr: DevicePointer::from_raw(0),
520 #[cfg(feature = "egl")]
521 graphics_resource: null_mut(),
522 #[cfg(feature = "egl")]
523 egl_context: None,
524 #[cfg(feature = "egl")]
525 egl_texture: 0,
526 })
527 }
528
529 fn create_encoder(
530 width: u32,
531 height: u32,
532 encoder: &str,
533 quality: &QualityPreset,
534 cuda_ctx: &Context,
535 ) -> Result<ffmpeg::codec::encoder::Video> {
536 let encoder_codec =
537 ffmpeg::codec::encoder::find_by_name(encoder).ok_or(ffmpeg::Error::EncoderNotFound)?;
538
539 let mut encoder_ctx = ffmpeg::codec::context::Context::new_with_codec(encoder_codec)
540 .encoder()
541 .video()?;
542
543 encoder_ctx.set_width(width);
544 encoder_ctx.set_height(height);
545 encoder_ctx.set_format(ffmpeg::format::Pixel::CUDA);
546 encoder_ctx.set_bit_rate(16_000_000);
547
548 unsafe {
549 let nvenc_device =
550 av_hwdevice_ctx_alloc(ffmpeg_next::ffi::AVHWDeviceType::AV_HWDEVICE_TYPE_CUDA);
551 if nvenc_device.is_null() {
552 return Err(WaycapError::Init(
553 "Could not initialize nvenc device".into(),
554 ));
555 }
556
557 let hw_device_ctx = (*nvenc_device).data as *mut AVHWDeviceContext;
558 let cuda_device_ctx = (*hw_device_ctx).hwctx as *mut AVCUDADeviceContext;
559 (*cuda_device_ctx).cuda_ctx = cuda_ctx.as_raw();
560
561 let err = av_hwdevice_ctx_init(nvenc_device);
562 if err < 0 {
563 return Err(WaycapError::Init(format!(
564 "Error trying to initialize hw device context: {err:?}"
565 )));
566 }
567
568 let hw_device_ctx = (*nvenc_device).data as *mut AVHWDeviceContext;
569 let cuda_device_ctx = (*hw_device_ctx).hwctx as *mut AVCUDADeviceContext;
570 (*cuda_device_ctx).cuda_ctx = cuda_ctx.as_raw();
571
572 let mut frame_ctx = create_hw_frame_ctx(nvenc_device)?;
573 if frame_ctx.is_null() {
574 return Err(WaycapError::Init(
575 "Could not initialize hw frame context".into(),
576 ));
577 }
578
579 let hw_frame_context = &mut *((*frame_ctx).data as *mut AVHWFramesContext);
580 hw_frame_context.width = width as i32;
581 hw_frame_context.height = height as i32;
582 #[cfg(feature = "vulkan")]
583 {
584 hw_frame_context.sw_format = AVPixelFormat::AV_PIX_FMT_BGRA;
585 }
586 #[cfg(feature = "egl")]
587 {
588 hw_frame_context.sw_format = AVPixelFormat::AV_PIX_FMT_RGBA;
589 }
590 hw_frame_context.format = encoder_ctx.format().into();
591 hw_frame_context.device_ctx = hw_device_ctx;
592 hw_frame_context.initial_pool_size = 2;
593
594 let err = av_hwframe_ctx_init(frame_ctx);
595 if err < 0 {
596 return Err(WaycapError::Init(format!(
597 "Error trying to initialize hw frame context: {err:?}"
598 )));
599 }
600
601 (*encoder_ctx.as_mut_ptr()).hw_device_ctx = av_buffer_ref(nvenc_device);
602 (*encoder_ctx.as_mut_ptr()).hw_frames_ctx = av_buffer_ref(frame_ctx);
603 av_buffer_unref(&mut frame_ctx);
604 }
605
606 encoder_ctx.set_time_base(Rational::new(1, TIME_UNIT_NS as i32));
607 encoder_ctx.set_gop(GOP_SIZE);
608
609 let encoder_params = ffmpeg::codec::Parameters::new();
610 let opts = Self::get_encoder_params(quality);
611 encoder_ctx.set_parameters(encoder_params)?;
612 let encoder = encoder_ctx.open_with(opts)?;
613 Ok(encoder)
614 }
615
616 fn get_encoder_params(quality: &QualityPreset) -> ffmpeg::Dictionary<'_> {
617 let mut opts = ffmpeg::Dictionary::new();
618 opts.set("vsync", "vfr");
619 opts.set("rc", "vbr");
620 opts.set("tune", "hq");
621 match quality {
622 QualityPreset::Low => {
623 opts.set("preset", "p2");
624 opts.set("cq", "30");
625 opts.set("b:v", "20M");
626 }
627 QualityPreset::Medium => {
628 opts.set("preset", "p4");
629 opts.set("cq", "25");
630 opts.set("b:v", "40M");
631 }
632 QualityPreset::High => {
633 opts.set("preset", "p7");
634 opts.set("cq", "20");
635 opts.set("b:v", "80M");
636 }
637 QualityPreset::Ultra => {
638 opts.set("preset", "p7");
639 opts.set("cq", "15");
640 opts.set("b:v", "120M");
641 }
642 }
643 opts
644 }
645
646 #[cfg(feature = "egl")]
647 fn init_gl(&mut self, texture_id: Option<u32>) -> Result<()> {
648 self.egl_texture = match texture_id {
649 Some(texture_id) => texture_id,
650 None => {
651 self.egl_context
652 .as_ref()
653 .unwrap()
654 .create_persistent_texture()?;
655 self.egl_context.as_ref().unwrap().get_texture_id().unwrap()
656 }
657 };
658
659 unsafe {
660 let result = cuGraphicsGLRegisterImage(
661 &mut self.graphics_resource,
662 self.egl_texture,
663 gl::TEXTURE_2D,
664 0x00, );
666
667 if result != CUresult::CUDA_SUCCESS {
668 return Err(WaycapError::Init(format!(
669 "Error registering GL texture to CUDA: {result:?}",
670 )));
671 }
672
673 let result = cuGraphicsResourceSetMapFlags_v2(self.graphics_resource, 0);
674
675 if result != CUresult::CUDA_SUCCESS {
676 cuGraphicsUnregisterResource(self.graphics_resource);
677 gl::BindTexture(gl::TEXTURE_2D, 0);
678 return Err(WaycapError::Init(format!(
679 "Failed to set graphics resource map flags: {result:?}",
680 )));
681 }
682 }
683
684 Ok(())
685 }
686
687 fn make_current(&self) -> Result<()> {
688 unsafe { cuCtxSetCurrent(self.cuda_ctx.as_raw()) };
689 Ok(())
690 }
691}
692
693#[cfg(feature = "egl")]
694fn egl_img_from_dmabuf(egl_ctx: &EglContext, raw_frame: &RawVideoFrame) -> Result<Image> {
695 let dma_buf_planes = extract_dmabuf_planes(raw_frame)?;
696 let format = drm_fourcc::DrmFourcc::Argb8888 as u32;
697 let modifier = raw_frame.modifier;
698 let egl_image = egl_ctx.create_image_from_dmabuf(
699 &dma_buf_planes,
700 format,
701 raw_frame.dimensions.width,
702 raw_frame.dimensions.height,
703 modifier,
704 )?;
705 egl_ctx.update_texture_from_image(egl_image)?;
706 Ok(egl_image)
707}
708
709impl Drop for NvencEncoder {
710 fn drop(&mut self) {
711 if let Err(e) = self.drain() {
712 log::debug!("Encoder drain on drop: {e:?}");
713 }
714 self.drop_processor();
715
716 #[cfg(feature = "egl")]
717 {
718 if let Some(egl_ctx) = self.egl_context.as_ref() {
719 let _ = egl_ctx.make_current();
720 }
721 }
722
723 if let Err(e) = self.make_current() {
724 log::error!("Could not set CUDA context current during drop: {e:?}");
725 }
726
727 #[cfg(feature = "egl")]
728 unsafe {
729 let result = cuGraphicsUnregisterResource(self.graphics_resource);
730 if result != CUresult::CUDA_SUCCESS {
731 log::error!("Error cleaning up graphics resource: {result:?}");
732 }
733 }
734
735 #[cfg(feature = "vulkan")]
736 {
737 drop(self.cuda_ext_memory.take());
742 drop(self.vulkan_ctx.take());
743 }
744 }
745}