Skip to main content

kernel/
dma.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Direct memory access (DMA).
4//!
5//! C header: [`include/linux/dma-mapping.h`](srctree/include/linux/dma-mapping.h)
6
7use crate::{
8    bindings, build_assert, device,
9    device::{Bound, Core},
10    error::{to_result, Result},
11    prelude::*,
12    sync::aref::ARef,
13    transmute::{AsBytes, FromBytes},
14};
15use core::ptr::NonNull;
16
17/// DMA address type.
18///
19/// Represents a bus address used for Direct Memory Access (DMA) operations.
20///
21/// This is an alias of the kernel's `dma_addr_t`, which may be `u32` or `u64` depending on
22/// `CONFIG_ARCH_DMA_ADDR_T_64BIT`.
23///
24/// Note that this may be `u64` even on 32-bit architectures.
25pub type DmaAddress = bindings::dma_addr_t;
26
27/// Trait to be implemented by DMA capable bus devices.
28///
29/// The [`dma::Device`](Device) trait should be implemented by bus specific device representations,
30/// where the underlying bus is DMA capable, such as:
31#[cfg_attr(CONFIG_PCI, doc = "* [`pci::Device`](kernel::pci::Device)")]
32/// * [`platform::Device`](::kernel::platform::Device)
33pub trait Device: AsRef<device::Device<Core>> {
34    /// Set up the device's DMA streaming addressing capabilities.
35    ///
36    /// This method is usually called once from `probe()` as soon as the device capabilities are
37    /// known.
38    ///
39    /// # Safety
40    ///
41    /// This method must not be called concurrently with any DMA allocation or mapping primitives,
42    /// such as [`CoherentAllocation::alloc_attrs`].
43    unsafe fn dma_set_mask(&self, mask: DmaMask) -> Result {
44        // SAFETY:
45        // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
46        // - The safety requirement of this function guarantees that there are no concurrent calls
47        //   to DMA allocation and mapping primitives using this mask.
48        to_result(unsafe { bindings::dma_set_mask(self.as_ref().as_raw(), mask.value()) })
49    }
50
51    /// Set up the device's DMA coherent addressing capabilities.
52    ///
53    /// This method is usually called once from `probe()` as soon as the device capabilities are
54    /// known.
55    ///
56    /// # Safety
57    ///
58    /// This method must not be called concurrently with any DMA allocation or mapping primitives,
59    /// such as [`CoherentAllocation::alloc_attrs`].
60    unsafe fn dma_set_coherent_mask(&self, mask: DmaMask) -> Result {
61        // SAFETY:
62        // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
63        // - The safety requirement of this function guarantees that there are no concurrent calls
64        //   to DMA allocation and mapping primitives using this mask.
65        to_result(unsafe { bindings::dma_set_coherent_mask(self.as_ref().as_raw(), mask.value()) })
66    }
67
68    /// Set up the device's DMA addressing capabilities.
69    ///
70    /// This is a combination of [`Device::dma_set_mask`] and [`Device::dma_set_coherent_mask`].
71    ///
72    /// This method is usually called once from `probe()` as soon as the device capabilities are
73    /// known.
74    ///
75    /// # Safety
76    ///
77    /// This method must not be called concurrently with any DMA allocation or mapping primitives,
78    /// such as [`CoherentAllocation::alloc_attrs`].
79    unsafe fn dma_set_mask_and_coherent(&self, mask: DmaMask) -> Result {
80        // SAFETY:
81        // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
82        // - The safety requirement of this function guarantees that there are no concurrent calls
83        //   to DMA allocation and mapping primitives using this mask.
84        to_result(unsafe {
85            bindings::dma_set_mask_and_coherent(self.as_ref().as_raw(), mask.value())
86        })
87    }
88
89    /// Set the maximum size of a single DMA segment the device may request.
90    ///
91    /// This method is usually called once from `probe()` as soon as the device capabilities are
92    /// known.
93    ///
94    /// # Safety
95    ///
96    /// This method must not be called concurrently with any DMA allocation or mapping primitives,
97    /// such as [`CoherentAllocation::alloc_attrs`].
98    unsafe fn dma_set_max_seg_size(&self, size: u32) {
99        // SAFETY:
100        // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
101        // - The safety requirement of this function guarantees that there are no concurrent calls
102        //   to DMA allocation and mapping primitives using this parameter.
103        unsafe { bindings::dma_set_max_seg_size(self.as_ref().as_raw(), size) }
104    }
105}
106
107/// A DMA mask that holds a bitmask with the lowest `n` bits set.
108///
109/// Use [`DmaMask::new`] or [`DmaMask::try_new`] to construct a value. Values
110/// are guaranteed to never exceed the bit width of `u64`.
111///
112/// This is the Rust equivalent of the C macro `DMA_BIT_MASK()`.
113#[derive(Debug, Clone, Copy, PartialEq, Eq)]
114pub struct DmaMask(u64);
115
116impl DmaMask {
117    /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
118    ///
119    /// For `n <= 64`, sets exactly the lowest `n` bits.
120    /// For `n > 64`, results in a build error.
121    ///
122    /// # Examples
123    ///
124    /// ```
125    /// use kernel::dma::DmaMask;
126    ///
127    /// let mask0 = DmaMask::new::<0>();
128    /// assert_eq!(mask0.value(), 0);
129    ///
130    /// let mask1 = DmaMask::new::<1>();
131    /// assert_eq!(mask1.value(), 0b1);
132    ///
133    /// let mask64 = DmaMask::new::<64>();
134    /// assert_eq!(mask64.value(), u64::MAX);
135    ///
136    /// // Build failure.
137    /// // let mask_overflow = DmaMask::new::<100>();
138    /// ```
139    #[inline]
140    pub const fn new<const N: u32>() -> Self {
141        let Ok(mask) = Self::try_new(N) else {
142            build_error!("Invalid DMA Mask.");
143        };
144
145        mask
146    }
147
148    /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
149    ///
150    /// For `n <= 64`, sets exactly the lowest `n` bits.
151    /// For `n > 64`, returns [`EINVAL`].
152    ///
153    /// # Examples
154    ///
155    /// ```
156    /// use kernel::dma::DmaMask;
157    ///
158    /// let mask0 = DmaMask::try_new(0)?;
159    /// assert_eq!(mask0.value(), 0);
160    ///
161    /// let mask1 = DmaMask::try_new(1)?;
162    /// assert_eq!(mask1.value(), 0b1);
163    ///
164    /// let mask64 = DmaMask::try_new(64)?;
165    /// assert_eq!(mask64.value(), u64::MAX);
166    ///
167    /// let mask_overflow = DmaMask::try_new(100);
168    /// assert!(mask_overflow.is_err());
169    /// # Ok::<(), Error>(())
170    /// ```
171    #[inline]
172    pub const fn try_new(n: u32) -> Result<Self> {
173        Ok(Self(match n {
174            0 => 0,
175            1..=64 => u64::MAX >> (64 - n),
176            _ => return Err(EINVAL),
177        }))
178    }
179
180    /// Returns the underlying `u64` bitmask value.
181    #[inline]
182    pub const fn value(&self) -> u64 {
183        self.0
184    }
185}
186
187/// Possible attributes associated with a DMA mapping.
188///
189/// They can be combined with the operators `|`, `&`, and `!`.
190///
191/// Values can be used from the [`attrs`] module.
192///
193/// # Examples
194///
195/// ```
196/// # use kernel::device::{Bound, Device};
197/// use kernel::dma::{attrs::*, CoherentAllocation};
198///
199/// # fn test(dev: &Device<Bound>) -> Result {
200/// let attribs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_WARN;
201/// let c: CoherentAllocation<u64> =
202///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, attribs)?;
203/// # Ok::<(), Error>(()) }
204/// ```
205#[derive(Clone, Copy, PartialEq)]
206#[repr(transparent)]
207pub struct Attrs(u32);
208
209impl Attrs {
210    /// Get the raw representation of this attribute.
211    pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
212        self.0 as crate::ffi::c_ulong
213    }
214
215    /// Check whether `flags` is contained in `self`.
216    pub fn contains(self, flags: Attrs) -> bool {
217        (self & flags) == flags
218    }
219}
220
221impl core::ops::BitOr for Attrs {
222    type Output = Self;
223    fn bitor(self, rhs: Self) -> Self::Output {
224        Self(self.0 | rhs.0)
225    }
226}
227
228impl core::ops::BitAnd for Attrs {
229    type Output = Self;
230    fn bitand(self, rhs: Self) -> Self::Output {
231        Self(self.0 & rhs.0)
232    }
233}
234
235impl core::ops::Not for Attrs {
236    type Output = Self;
237    fn not(self) -> Self::Output {
238        Self(!self.0)
239    }
240}
241
242/// DMA mapping attributes.
243pub mod attrs {
244    use super::Attrs;
245
246    /// Specifies that reads and writes to the mapping may be weakly ordered, that is that reads
247    /// and writes may pass each other.
248    pub const DMA_ATTR_WEAK_ORDERING: Attrs = Attrs(bindings::DMA_ATTR_WEAK_ORDERING);
249
250    /// Specifies that writes to the mapping may be buffered to improve performance.
251    pub const DMA_ATTR_WRITE_COMBINE: Attrs = Attrs(bindings::DMA_ATTR_WRITE_COMBINE);
252
253    /// Allows platform code to skip synchronization of the CPU cache for the given buffer assuming
254    /// that it has been already transferred to 'device' domain.
255    pub const DMA_ATTR_SKIP_CPU_SYNC: Attrs = Attrs(bindings::DMA_ATTR_SKIP_CPU_SYNC);
256
257    /// Forces contiguous allocation of the buffer in physical memory.
258    pub const DMA_ATTR_FORCE_CONTIGUOUS: Attrs = Attrs(bindings::DMA_ATTR_FORCE_CONTIGUOUS);
259
260    /// Hints DMA-mapping subsystem that it's probably not worth the time to try
261    /// to allocate memory to in a way that gives better TLB efficiency.
262    pub const DMA_ATTR_ALLOC_SINGLE_PAGES: Attrs = Attrs(bindings::DMA_ATTR_ALLOC_SINGLE_PAGES);
263
264    /// This tells the DMA-mapping subsystem to suppress allocation failure reports (similarly to
265    /// `__GFP_NOWARN`).
266    pub const DMA_ATTR_NO_WARN: Attrs = Attrs(bindings::DMA_ATTR_NO_WARN);
267
268    /// Indicates that the buffer is fully accessible at an elevated privilege level (and
269    /// ideally inaccessible or at least read-only at lesser-privileged levels).
270    pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
271
272    /// Indicates that the buffer is MMIO memory.
273    pub const DMA_ATTR_MMIO: Attrs = Attrs(bindings::DMA_ATTR_MMIO);
274}
275
276/// DMA data direction.
277///
278/// Corresponds to the C [`enum dma_data_direction`].
279///
280/// [`enum dma_data_direction`]: srctree/include/linux/dma-direction.h
281#[derive(Copy, Clone, PartialEq, Eq, Debug)]
282#[repr(u32)]
283pub enum DataDirection {
284    /// The DMA mapping is for bidirectional data transfer.
285    ///
286    /// This is used when the buffer can be both read from and written to by the device.
287    /// The cache for the corresponding memory region is both flushed and invalidated.
288    Bidirectional = Self::const_cast(bindings::dma_data_direction_DMA_BIDIRECTIONAL),
289
290    /// The DMA mapping is for data transfer from memory to the device (write).
291    ///
292    /// The CPU has prepared data in the buffer, and the device will read it.
293    /// The cache for the corresponding memory region is flushed before device access.
294    ToDevice = Self::const_cast(bindings::dma_data_direction_DMA_TO_DEVICE),
295
296    /// The DMA mapping is for data transfer from the device to memory (read).
297    ///
298    /// The device will write data into the buffer for the CPU to read.
299    /// The cache for the corresponding memory region is invalidated before CPU access.
300    FromDevice = Self::const_cast(bindings::dma_data_direction_DMA_FROM_DEVICE),
301
302    /// The DMA mapping is not for data transfer.
303    ///
304    /// This is primarily for debugging purposes. With this direction, the DMA mapping API
305    /// will not perform any cache coherency operations.
306    None = Self::const_cast(bindings::dma_data_direction_DMA_NONE),
307}
308
309impl DataDirection {
310    /// Casts the bindgen-generated enum type to a `u32` at compile time.
311    ///
312    /// This function will cause a compile-time error if the underlying value of the
313    /// C enum is out of bounds for `u32`.
314    const fn const_cast(val: bindings::dma_data_direction) -> u32 {
315        // CAST: The C standard allows compilers to choose different integer types for enums.
316        // To safely check the value, we cast it to a wide signed integer type (`i128`)
317        // which can hold any standard C integer enum type without truncation.
318        let wide_val = val as i128;
319
320        // Check if the value is outside the valid range for the target type `u32`.
321        // CAST: `u32::MAX` is cast to `i128` to match the type of `wide_val` for the comparison.
322        if wide_val < 0 || wide_val > u32::MAX as i128 {
323            // Trigger a compile-time error in a const context.
324            build_error!("C enum value is out of bounds for the target type `u32`.");
325        }
326
327        // CAST: This cast is valid because the check above guarantees that `wide_val`
328        // is within the representable range of `u32`.
329        wide_val as u32
330    }
331}
332
333impl From<DataDirection> for bindings::dma_data_direction {
334    /// Returns the raw representation of [`enum dma_data_direction`].
335    fn from(direction: DataDirection) -> Self {
336        // CAST: `direction as u32` gets the underlying representation of our `#[repr(u32)]` enum.
337        // The subsequent cast to `Self` (the bindgen type) assumes the C enum is compatible
338        // with the enum variants of `DataDirection`, which is a valid assumption given our
339        // compile-time checks.
340        direction as u32 as Self
341    }
342}
343
344/// An abstraction of the `dma_alloc_coherent` API.
345///
346/// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
347/// large coherent DMA regions.
348///
349/// A [`CoherentAllocation`] instance contains a pointer to the allocated region (in the
350/// processor's virtual address space) and the device address which can be given to the device
351/// as the DMA address base of the region. The region is released once [`CoherentAllocation`]
352/// is dropped.
353///
354/// # Invariants
355///
356/// - For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
357///   to an allocated region of coherent memory and `dma_handle` is the DMA address base of the
358///   region.
359/// - The size in bytes of the allocation is equal to `size_of::<T> * count`.
360/// - `size_of::<T> * count` fits into a `usize`.
361// TODO
362//
363// DMA allocations potentially carry device resources (e.g.IOMMU mappings), hence for soundness
364// reasons DMA allocation would need to be embedded in a `Devres` container, in order to ensure
365// that device resources can never survive device unbind.
366//
367// However, it is neither desirable nor necessary to protect the allocated memory of the DMA
368// allocation from surviving device unbind; it would require RCU read side critical sections to
369// access the memory, which may require subsequent unnecessary copies.
370//
371// Hence, find a way to revoke the device resources of a `CoherentAllocation`, but not the
372// entire `CoherentAllocation` including the allocated memory itself.
373pub struct CoherentAllocation<T: AsBytes + FromBytes> {
374    dev: ARef<device::Device>,
375    dma_handle: DmaAddress,
376    count: usize,
377    cpu_addr: NonNull<T>,
378    dma_attrs: Attrs,
379}
380
381impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
382    /// Allocates a region of `size_of::<T> * count` of coherent memory.
383    ///
384    /// # Examples
385    ///
386    /// ```
387    /// # use kernel::device::{Bound, Device};
388    /// use kernel::dma::{attrs::*, CoherentAllocation};
389    ///
390    /// # fn test(dev: &Device<Bound>) -> Result {
391    /// let c: CoherentAllocation<u64> =
392    ///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, DMA_ATTR_NO_WARN)?;
393    /// # Ok::<(), Error>(()) }
394    /// ```
395    pub fn alloc_attrs(
396        dev: &device::Device<Bound>,
397        count: usize,
398        gfp_flags: kernel::alloc::Flags,
399        dma_attrs: Attrs,
400    ) -> Result<CoherentAllocation<T>> {
401        build_assert!(
402            core::mem::size_of::<T>() > 0,
403            "It doesn't make sense for the allocated type to be a ZST"
404        );
405
406        let size = count
407            .checked_mul(core::mem::size_of::<T>())
408            .ok_or(EOVERFLOW)?;
409        let mut dma_handle = 0;
410        // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
411        let addr = unsafe {
412            bindings::dma_alloc_attrs(
413                dev.as_raw(),
414                size,
415                &mut dma_handle,
416                gfp_flags.as_raw(),
417                dma_attrs.as_raw(),
418            )
419        };
420        let addr = NonNull::new(addr).ok_or(ENOMEM)?;
421        // INVARIANT:
422        // - We just successfully allocated a coherent region which is accessible for
423        //   `count` elements, hence the cpu address is valid. We also hold a refcounted reference
424        //   to the device.
425        // - The allocated `size` is equal to `size_of::<T> * count`.
426        // - The allocated `size` fits into a `usize`.
427        Ok(Self {
428            dev: dev.into(),
429            dma_handle,
430            count,
431            cpu_addr: addr.cast(),
432            dma_attrs,
433        })
434    }
435
436    /// Performs the same functionality as [`CoherentAllocation::alloc_attrs`], except the
437    /// `dma_attrs` is 0 by default.
438    pub fn alloc_coherent(
439        dev: &device::Device<Bound>,
440        count: usize,
441        gfp_flags: kernel::alloc::Flags,
442    ) -> Result<CoherentAllocation<T>> {
443        CoherentAllocation::alloc_attrs(dev, count, gfp_flags, Attrs(0))
444    }
445
446    /// Returns the number of elements `T` in this allocation.
447    ///
448    /// Note that this is not the size of the allocation in bytes, which is provided by
449    /// [`Self::size`].
450    pub fn count(&self) -> usize {
451        self.count
452    }
453
454    /// Returns the size in bytes of this allocation.
455    pub fn size(&self) -> usize {
456        // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits into
457        // a `usize`.
458        self.count * core::mem::size_of::<T>()
459    }
460
461    /// Returns the raw pointer to the allocated region in the CPU's virtual address space.
462    #[inline]
463    pub fn as_ptr(&self) -> *const [T] {
464        core::ptr::slice_from_raw_parts(self.cpu_addr.as_ptr(), self.count)
465    }
466
467    /// Returns the raw pointer to the allocated region in the CPU's virtual address space as
468    /// a mutable pointer.
469    #[inline]
470    pub fn as_mut_ptr(&self) -> *mut [T] {
471        core::ptr::slice_from_raw_parts_mut(self.cpu_addr.as_ptr(), self.count)
472    }
473
474    /// Returns the base address to the allocated region in the CPU's virtual address space.
475    pub fn start_ptr(&self) -> *const T {
476        self.cpu_addr.as_ptr()
477    }
478
479    /// Returns the base address to the allocated region in the CPU's virtual address space as
480    /// a mutable pointer.
481    pub fn start_ptr_mut(&mut self) -> *mut T {
482        self.cpu_addr.as_ptr()
483    }
484
485    /// Returns a DMA handle which may be given to the device as the DMA address base of
486    /// the region.
487    pub fn dma_handle(&self) -> DmaAddress {
488        self.dma_handle
489    }
490
491    /// Returns a DMA handle starting at `offset` (in units of `T`) which may be given to the
492    /// device as the DMA address base of the region.
493    ///
494    /// Returns `EINVAL` if `offset` is not within the bounds of the allocation.
495    pub fn dma_handle_with_offset(&self, offset: usize) -> Result<DmaAddress> {
496        if offset >= self.count {
497            Err(EINVAL)
498        } else {
499            // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits
500            // into a `usize`, and `offset` is inferior to `count`.
501            Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as DmaAddress)
502        }
503    }
504
505    /// Common helper to validate a range applied from the allocated region in the CPU's virtual
506    /// address space.
507    fn validate_range(&self, offset: usize, count: usize) -> Result {
508        if offset.checked_add(count).ok_or(EOVERFLOW)? > self.count {
509            return Err(EINVAL);
510        }
511        Ok(())
512    }
513
514    /// Returns the data from the region starting from `offset` as a slice.
515    /// `offset` and `count` are in units of `T`, not the number of bytes.
516    ///
517    /// For ringbuffer type of r/w access or use-cases where the pointer to the live data is needed,
518    /// [`CoherentAllocation::start_ptr`] or [`CoherentAllocation::start_ptr_mut`] could be used
519    /// instead.
520    ///
521    /// # Safety
522    ///
523    /// * Callers must ensure that the device does not read/write to/from memory while the returned
524    ///   slice is live.
525    /// * Callers must ensure that this call does not race with a write to the same region while
526    ///   the returned slice is live.
527    pub unsafe fn as_slice(&self, offset: usize, count: usize) -> Result<&[T]> {
528        self.validate_range(offset, count)?;
529        // SAFETY:
530        // - The pointer is valid due to type invariant on `CoherentAllocation`,
531        //   we've just checked that the range and index is within bounds. The immutability of the
532        //   data is also guaranteed by the safety requirements of the function.
533        // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
534        //   that `self.count` won't overflow early in the constructor.
535        Ok(unsafe { core::slice::from_raw_parts(self.start_ptr().add(offset), count) })
536    }
537
538    /// Performs the same functionality as [`CoherentAllocation::as_slice`], except that a mutable
539    /// slice is returned.
540    ///
541    /// # Safety
542    ///
543    /// * Callers must ensure that the device does not read/write to/from memory while the returned
544    ///   slice is live.
545    /// * Callers must ensure that this call does not race with a read or write to the same region
546    ///   while the returned slice is live.
547    pub unsafe fn as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]> {
548        self.validate_range(offset, count)?;
549        // SAFETY:
550        // - The pointer is valid due to type invariant on `CoherentAllocation`,
551        //   we've just checked that the range and index is within bounds. The immutability of the
552        //   data is also guaranteed by the safety requirements of the function.
553        // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
554        //   that `self.count` won't overflow early in the constructor.
555        Ok(unsafe { core::slice::from_raw_parts_mut(self.start_ptr_mut().add(offset), count) })
556    }
557
558    /// Writes data to the region starting from `offset`. `offset` is in units of `T`, not the
559    /// number of bytes.
560    ///
561    /// # Safety
562    ///
563    /// * Callers must ensure that this call does not race with a read or write to the same region
564    ///   that overlaps with this write.
565    ///
566    /// # Examples
567    ///
568    /// ```
569    /// # fn test(alloc: &mut kernel::dma::CoherentAllocation<u8>) -> Result {
570    /// let somedata: [u8; 4] = [0xf; 4];
571    /// let buf: &[u8] = &somedata;
572    /// // SAFETY: There is no concurrent HW operation on the device and no other R/W access to the
573    /// // region.
574    /// unsafe { alloc.write(buf, 0)?; }
575    /// # Ok::<(), Error>(()) }
576    /// ```
577    pub unsafe fn write(&mut self, src: &[T], offset: usize) -> Result {
578        self.validate_range(offset, src.len())?;
579        // SAFETY:
580        // - The pointer is valid due to type invariant on `CoherentAllocation`
581        //   and we've just checked that the range and index is within bounds.
582        // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
583        //   that `self.count` won't overflow early in the constructor.
584        unsafe {
585            core::ptr::copy_nonoverlapping(
586                src.as_ptr(),
587                self.start_ptr_mut().add(offset),
588                src.len(),
589            )
590        };
591        Ok(())
592    }
593
594    /// Reads the value of `field` and ensures that its type is [`FromBytes`].
595    ///
596    /// # Safety
597    ///
598    /// This must be called from the [`dma_read`] macro which ensures that the `field` pointer is
599    /// validated beforehand.
600    ///
601    /// Public but hidden since it should only be used from [`dma_read`] macro.
602    #[doc(hidden)]
603    pub unsafe fn field_read<F: FromBytes>(&self, field: *const F) -> F {
604        // SAFETY:
605        // - By the safety requirements field is valid.
606        // - Using read_volatile() here is not sound as per the usual rules, the usage here is
607        // a special exception with the following notes in place. When dealing with a potential
608        // race from a hardware or code outside kernel (e.g. user-space program), we need that
609        // read on a valid memory is not UB. Currently read_volatile() is used for this, and the
610        // rationale behind is that it should generate the same code as READ_ONCE() which the
611        // kernel already relies on to avoid UB on data races. Note that the usage of
612        // read_volatile() is limited to this particular case, it cannot be used to prevent
613        // the UB caused by racing between two kernel functions nor do they provide atomicity.
614        unsafe { field.read_volatile() }
615    }
616
617    /// Writes a value to `field` and ensures that its type is [`AsBytes`].
618    ///
619    /// # Safety
620    ///
621    /// This must be called from the [`dma_write`] macro which ensures that the `field` pointer is
622    /// validated beforehand.
623    ///
624    /// Public but hidden since it should only be used from [`dma_write`] macro.
625    #[doc(hidden)]
626    pub unsafe fn field_write<F: AsBytes>(&self, field: *mut F, val: F) {
627        // SAFETY:
628        // - By the safety requirements field is valid.
629        // - Using write_volatile() here is not sound as per the usual rules, the usage here is
630        // a special exception with the following notes in place. When dealing with a potential
631        // race from a hardware or code outside kernel (e.g. user-space program), we need that
632        // write on a valid memory is not UB. Currently write_volatile() is used for this, and the
633        // rationale behind is that it should generate the same code as WRITE_ONCE() which the
634        // kernel already relies on to avoid UB on data races. Note that the usage of
635        // write_volatile() is limited to this particular case, it cannot be used to prevent
636        // the UB caused by racing between two kernel functions nor do they provide atomicity.
637        unsafe { field.write_volatile(val) }
638    }
639}
640
641/// Note that the device configured to do DMA must be halted before this object is dropped.
642impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
643    fn drop(&mut self) {
644        let size = self.count * core::mem::size_of::<T>();
645        // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
646        // The cpu address, and the dma handle are valid due to the type invariants on
647        // `CoherentAllocation`.
648        unsafe {
649            bindings::dma_free_attrs(
650                self.dev.as_raw(),
651                size,
652                self.start_ptr_mut().cast(),
653                self.dma_handle,
654                self.dma_attrs.as_raw(),
655            )
656        }
657    }
658}
659
660// SAFETY: It is safe to send a `CoherentAllocation` to another thread if `T`
661// can be sent to another thread.
662unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
663
664/// Reads a field of an item from an allocated region of structs.
665///
666/// The syntax is of the form `kernel::dma_read!(dma, proj)` where `dma` is an expression evaluating
667/// to a [`CoherentAllocation`] and `proj` is a [projection specification](kernel::ptr::project!).
668///
669/// # Examples
670///
671/// ```
672/// use kernel::device::Device;
673/// use kernel::dma::{attrs::*, CoherentAllocation};
674///
675/// struct MyStruct { field: u32, }
676///
677/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
678/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
679/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
680/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
681///
682/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
683/// let whole = kernel::dma_read!(alloc, [2]?);
684/// let field = kernel::dma_read!(alloc, [1]?.field);
685/// # Ok::<(), Error>(()) }
686/// ```
687#[macro_export]
688macro_rules! dma_read {
689    ($dma:expr, $($proj:tt)*) => {{
690        let dma = &$dma;
691        let ptr = $crate::ptr::project!(
692            $crate::dma::CoherentAllocation::as_ptr(dma), $($proj)*
693        );
694        // SAFETY: The pointer created by the projection is within the DMA region.
695        unsafe { $crate::dma::CoherentAllocation::field_read(dma, ptr) }
696    }};
697}
698
699/// Writes to a field of an item from an allocated region of structs.
700///
701/// The syntax is of the form `kernel::dma_write!(dma, proj, val)` where `dma` is an expression
702/// evaluating to a [`CoherentAllocation`], `proj` is a
703/// [projection specification](kernel::ptr::project!), and `val` is the value to be written to the
704/// projected location.
705///
706/// # Examples
707///
708/// ```
709/// use kernel::device::Device;
710/// use kernel::dma::{attrs::*, CoherentAllocation};
711///
712/// struct MyStruct { member: u32, }
713///
714/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
715/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
716/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
717/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
718///
719/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
720/// kernel::dma_write!(alloc, [2]?.member, 0xf);
721/// kernel::dma_write!(alloc, [1]?, MyStruct { member: 0xf });
722/// # Ok::<(), Error>(()) }
723/// ```
724#[macro_export]
725macro_rules! dma_write {
726    (@parse [$dma:expr] [$($proj:tt)*] [, $val:expr]) => {{
727        let dma = &$dma;
728        let ptr = $crate::ptr::project!(
729            mut $crate::dma::CoherentAllocation::as_mut_ptr(dma), $($proj)*
730        );
731        let val = $val;
732        // SAFETY: The pointer created by the projection is within the DMA region.
733        unsafe { $crate::dma::CoherentAllocation::field_write(dma, ptr, val) }
734    }};
735    (@parse [$dma:expr] [$($proj:tt)*] [.$field:tt $($rest:tt)*]) => {
736        $crate::dma_write!(@parse [$dma] [$($proj)* .$field] [$($rest)*])
737    };
738    (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr]? $($rest:tt)*]) => {
739        $crate::dma_write!(@parse [$dma] [$($proj)* [$index]?] [$($rest)*])
740    };
741    (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr] $($rest:tt)*]) => {
742        $crate::dma_write!(@parse [$dma] [$($proj)* [$index]] [$($rest)*])
743    };
744    ($dma:expr, $($rest:tt)*) => {
745        $crate::dma_write!(@parse [$dma] [] [$($rest)*])
746    };
747}