iPXE
dma.h
Go to the documentation of this file.
1 #ifndef _IPXE_DMA_H
2 #define _IPXE_DMA_H
3 
4 /** @file
5  *
6  * DMA mappings
7  *
8  */
9 
10 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11 FILE_SECBOOT ( PERMITTED );
12 
13 #include <stdint.h>
14 #include <ipxe/api.h>
15 #include <ipxe/io.h>
16 #include <ipxe/malloc.h>
17 #include <ipxe/umalloc.h>
18 #include <config/ioapi.h>
19 
20 #ifdef DMAAPI_OP
21 #define DMAAPI_PREFIX_op
22 #else
23 #define DMAAPI_PREFIX_op __op_
24 #endif
25 
26 #ifdef DMAAPI_FLAT
27 #define DMAAPI_PREFIX_flat
28 #else
29 #define DMAAPI_PREFIX_flat __flat_
30 #endif
31 
32 /** A DMA mapping */
33 struct dma_mapping {
34  /** Address offset
35  *
36  * This is the value that must be added to a physical address
37  * within the mapping in order to produce the corresponding
38  * device-side DMA address.
39  */
41  /** DMA device (if unmapping is required) */
42  struct dma_device *dma;
43  /** Platform mapping token */
44  void *token;
45 };
46 
47 /** A DMA-capable device */
48 struct dma_device {
49  /** DMA operations */
50  struct dma_operations *op;
51  /** Addressable space mask */
53  /** Total number of mappings (for debugging) */
54  unsigned int mapped;
55  /** Total number of allocations (for debugging) */
56  unsigned int allocated;
57 };
58 
59 /** DMA operations */
61  /**
62  * Map buffer for DMA
63  *
64  * @v dma DMA device
65  * @v map DMA mapping to fill in
66  * @v addr Buffer address
67  * @v len Length of buffer
68  * @v flags Mapping flags
69  * @ret rc Return status code
70  */
71  int ( * map ) ( struct dma_device *dma, struct dma_mapping *map,
72  void *addr, size_t len, int flags );
73  /**
74  * Unmap buffer
75  *
76  * @v dma DMA device
77  * @v map DMA mapping
78  * @v len Used length
79  */
80  void ( * unmap ) ( struct dma_device *dma, struct dma_mapping *map,
81  size_t len );
82  /**
83  * Allocate and map DMA-coherent buffer
84  *
85  * @v dma DMA device
86  * @v map DMA mapping to fill in
87  * @v len Length of buffer
88  * @v align Physical alignment
89  * @ret addr Buffer address, or NULL on error
90  */
91  void * ( * alloc ) ( struct dma_device *dma, struct dma_mapping *map,
92  size_t len, size_t align );
93  /**
94  * Unmap and free DMA-coherent buffer
95  *
96  * @v dma DMA device
97  * @v map DMA mapping
98  * @v addr Buffer address
99  * @v len Length of buffer
100  */
101  void ( * free ) ( struct dma_device *dma, struct dma_mapping *map,
102  void *addr, size_t len );
103  /**
104  * Allocate and map DMA-coherent buffer from external (user) memory
105  *
106  * @v dma DMA device
107  * @v map DMA mapping to fill in
108  * @v len Length of buffer
109  * @v align Physical alignment
110  * @ret addr Buffer address, or NULL on error
111  */
112  void * ( * umalloc ) ( struct dma_device *dma,
113  struct dma_mapping *map,
114  size_t len, size_t align );
115  /**
116  * Unmap and free DMA-coherent buffer from external (user) memory
117  *
118  * @v dma DMA device
119  * @v map DMA mapping
120  * @v addr Buffer address
121  * @v len Length of buffer
122  */
123  void ( * ufree ) ( struct dma_device *dma, struct dma_mapping *map,
124  void *addr, size_t len );
125  /**
126  * Set addressable space mask
127  *
128  * @v dma DMA device
129  * @v mask Addressable space mask
130  */
131  void ( * set_mask ) ( struct dma_device *dma, physaddr_t mask );
132 };
133 
134 /** Device will read data from host memory */
135 #define DMA_TX 0x01
136 
137 /** Device will write data to host memory */
138 #define DMA_RX 0x02
139 
140 /** Device will both read data from and write data to host memory */
141 #define DMA_BI ( DMA_TX | DMA_RX )
142 
143 /**
144  * Calculate static inline DMA I/O API function name
145  *
146  * @v _prefix Subsystem prefix
147  * @v _api_func API function
148  * @ret _subsys_func Subsystem API function
149  */
150 #define DMAAPI_INLINE( _subsys, _api_func ) \
151  SINGLE_API_INLINE ( DMAAPI_PREFIX_ ## _subsys, _api_func )
152 
153 /**
154  * Provide a DMA I/O API implementation
155  *
156  * @v _prefix Subsystem prefix
157  * @v _api_func API function
158  * @v _func Implementing function
159  */
160 #define PROVIDE_DMAAPI( _subsys, _api_func, _func ) \
161  PROVIDE_SINGLE_API ( DMAAPI_PREFIX_ ## _subsys, _api_func, _func )
162 
163 /**
164  * Provide a static inline DMA I/O API implementation
165  *
166  * @v _prefix Subsystem prefix
167  * @v _api_func API function
168  */
169 #define PROVIDE_DMAAPI_INLINE( _subsys, _api_func ) \
170  PROVIDE_SINGLE_API_INLINE ( DMAAPI_PREFIX_ ## _subsys, _api_func )
171 
172 /**
173  * Map buffer for DMA
174  *
175  * @v dma DMA device
176  * @v map DMA mapping to fill in
177  * @v addr Buffer address
178  * @v len Length of buffer
179  * @v flags Mapping flags
180  * @ret rc Return status code
181  */
182 static inline __always_inline int
183 DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma,
184  struct dma_mapping *map, void *addr __unused,
185  size_t len __unused, int flags __unused ) {
186 
187  /* Increment mapping count (for debugging) */
188  if ( DBG_LOG ) {
189  map->dma = dma;
190  dma->mapped++;
191  }
192 
193  return 0;
194 }
195 
196 /**
197  * Unmap buffer
198  *
199  * @v map DMA mapping
200  * @v len Used length
201  */
202 static inline __always_inline void
203 DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_mapping *map,
204  size_t len __unused ) {
205 
206  /* Decrement mapping count (for debugging) */
207  if ( DBG_LOG ) {
208  assert ( map->dma != NULL );
209  map->dma->mapped--;
210  map->dma = NULL;
211  }
212 }
213 
214 /**
215  * Allocate and map DMA-coherent buffer
216  *
217  * @v dma DMA device
218  * @v map DMA mapping to fill in
219  * @v len Length of buffer
220  * @v align Physical alignment
221  * @ret addr Buffer address, or NULL on error
222  */
223 static inline __always_inline void *
224 DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma,
225  struct dma_mapping *map,
226  size_t len, size_t align ) {
227  void *addr;
228 
229  /* Allocate buffer */
230  addr = malloc_phys ( len, align );
231 
232  /* Increment mapping count (for debugging) */
233  if ( DBG_LOG && addr ) {
234  map->dma = dma;
235  dma->mapped++;
236  }
237 
238  return addr;
239 }
240 
241 /**
242  * Unmap and free DMA-coherent buffer
243  *
244  * @v map DMA mapping
245  * @v addr Buffer address
246  * @v len Length of buffer
247  */
248 static inline __always_inline void
249 DMAAPI_INLINE ( flat, dma_free ) ( struct dma_mapping *map,
250  void *addr, size_t len ) {
251 
252  /* Free buffer */
253  free_phys ( addr, len );
254 
255  /* Decrement mapping count (for debugging) */
256  if ( DBG_LOG ) {
257  assert ( map->dma != NULL );
258  map->dma->mapped--;
259  map->dma = NULL;
260  }
261 }
262 
263 /**
264  * Allocate and map DMA-coherent buffer from external (user) memory
265  *
266  * @v dma DMA device
267  * @v map DMA mapping to fill in
268  * @v len Length of buffer
269  * @v align Physical alignment
270  * @ret addr Buffer address, or NULL on error
271  */
272 static inline __always_inline void *
273 DMAAPI_INLINE ( flat, dma_umalloc ) ( struct dma_device *dma,
274  struct dma_mapping *map,
275  size_t len, size_t align __unused ) {
276  void *addr;
277 
278  /* Allocate buffer */
279  addr = umalloc ( len );
280 
281  /* Increment mapping count (for debugging) */
282  if ( DBG_LOG && addr ) {
283  map->dma = dma;
284  dma->mapped++;
285  }
286 
287  return addr;
288 }
289 
290 /**
291  * Unmap and free DMA-coherent buffer from external (user) memory
292  *
293  * @v map DMA mapping
294  * @v addr Buffer address
295  * @v len Length of buffer
296  */
297 static inline __always_inline void
298 DMAAPI_INLINE ( flat, dma_ufree ) ( struct dma_mapping *map,
299  void *addr, size_t len __unused ) {
300 
301  /* Free buffer */
302  ufree ( addr );
303 
304  /* Decrement mapping count (for debugging) */
305  if ( DBG_LOG ) {
306  assert ( map->dma != NULL );
307  map->dma->mapped--;
308  map->dma = NULL;
309  }
310 }
311 
312 /**
313  * Set addressable space mask
314  *
315  * @v dma DMA device
316  * @v mask Addressable space mask
317  */
318 static inline __always_inline void
319 DMAAPI_INLINE ( flat, dma_set_mask ) ( struct dma_device *dma __unused,
321 
322  /* Nothing to do */
323 }
324 
325 /**
326  * Get DMA address from virtual address
327  *
328  * @v map DMA mapping
329  * @v addr Address within the mapped region
330  * @ret addr Device-side DMA address
331  */
332 static inline __always_inline physaddr_t
333 DMAAPI_INLINE ( flat, dma ) ( struct dma_mapping *map __unused, void *addr ) {
334 
335  /* Use physical address as device address */
336  return virt_to_phys ( addr );
337 }
338 
339 /**
340  * Get DMA address from virtual address
341  *
342  * @v map DMA mapping
343  * @v addr Address within the mapped region
344  * @ret addr Device-side DMA address
345  */
346 static inline __always_inline physaddr_t
347 DMAAPI_INLINE ( op, dma ) ( struct dma_mapping *map, void *addr ) {
348 
349  /* Adjust physical address using mapping offset */
350  return ( virt_to_phys ( addr ) + map->offset );
351 }
352 
353 /* Include all architecture-dependent DMA API headers */
354 #include <bits/dma.h>
355 
356 /**
357  * Map buffer for DMA
358  *
359  * @v dma DMA device
360  * @v map DMA mapping to fill in
361  * @v addr Buffer address
362  * @v len Length of buffer
363  * @v flags Mapping flags
364  * @ret rc Return status code
365  */
366 int dma_map ( struct dma_device *dma, struct dma_mapping *map,
367  void *addr, size_t len, int flags );
368 
369 /**
370  * Unmap buffer
371  *
372  * @v map DMA mapping
373  * @v len Used length
374  */
375 void dma_unmap ( struct dma_mapping *map, size_t len );
376 
377 /**
378  * Allocate and map DMA-coherent buffer
379  *
380  * @v dma DMA device
381  * @v map DMA mapping to fill in
382  * @v len Length of buffer
383  * @v align Physical alignment
384  * @ret addr Buffer address, or NULL on error
385  */
386 void * dma_alloc ( struct dma_device *dma, struct dma_mapping *map,
387  size_t len, size_t align );
388 
389 /**
390  * Unmap and free DMA-coherent buffer
391  *
392  * @v map DMA mapping
393  * @v addr Buffer address
394  * @v len Length of buffer
395  */
396 void dma_free ( struct dma_mapping *map, void *addr, size_t len );
397 
398 /**
399  * Allocate and map DMA-coherent buffer from external (user) memory
400  *
401  * @v dma DMA device
402  * @v map DMA mapping to fill in
403  * @v len Length of buffer
404  * @v align Physical alignment
405  * @ret addr Buffer address, or NULL on error
406  */
407 void * dma_umalloc ( struct dma_device *dma, struct dma_mapping *map,
408  size_t len, size_t align );
409 
410 /**
411  * Unmap and free DMA-coherent buffer from external (user) memory
412  *
413  * @v map DMA mapping
414  * @v addr Buffer address
415  * @v len Length of buffer
416  */
417 void dma_ufree ( struct dma_mapping *map, void *addr, size_t len );
418 
419 /**
420  * Set addressable space mask
421  *
422  * @v dma DMA device
423  * @v mask Addressable space mask
424  */
425 void dma_set_mask ( struct dma_device *dma, physaddr_t mask );
426 
427 /**
428  * Get DMA address from virtual address
429  *
430  * @v map DMA mapping
431  * @v addr Address within the mapped region
432  * @ret addr Device-side DMA address
433  */
434 physaddr_t dma ( struct dma_mapping *map, void *addr );
435 
436 /**
437  * Check if DMA unmapping is required
438  *
439  * @v map DMA mapping
440  * @v unmap Unmapping is required
441  */
442 static inline __always_inline int dma_mapped ( struct dma_mapping *map ) {
443 
444  /* Unmapping is required if a DMA device was recorded */
445  return ( map->dma != NULL );
446 }
447 
448 /**
449  * Initialise DMA device
450  *
451  * @v dma DMA device
452  * @v op DMA operations
453  */
454 static inline __always_inline void dma_init ( struct dma_device *dma,
455  struct dma_operations *op ) {
456 
457  /* Set operations table */
458  dma->op = op;
459 }
460 
461 /**
462  * Set 64-bit addressable space mask
463  *
464  * @v dma DMA device
465  */
466 static inline __always_inline void
468 
469  /* Set mask to maximum physical address */
470  dma_set_mask ( dma, ~( ( physaddr_t ) 0 ) );
471 }
472 
473 #endif /* _IPXE_DMA_H */
static __always_inline void ufree(void *ptr)
Free external memory.
Definition: umalloc.h:68
iPXE I/O API
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
void(* ufree)(struct dma_device *dma, struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer from external (user) memory.
Definition: dma.h:123
iPXE internal APIs
struct dma_device * dma
DMA device (if unmapping is required)
Definition: dma.h:42
void dma_unmap(struct dma_mapping *map, size_t len)
Unmap buffer.
static __always_inline int struct dma_mapping void *addr __unused
Definition: dma.h:184
struct dma_operations * op
DMA operations.
Definition: dma.h:50
static __always_inline void dma_set_mask_64bit(struct dma_device *dma)
Set 64-bit addressable space mask.
Definition: dma.h:467
FILE_SECBOOT(PERMITTED)
void dma_set_mask(struct dma_device *dma, physaddr_t mask)
Set addressable space mask.
Dynamic memory allocation.
Dummy architecture-specific DMA API implementations.
physaddr_t mask
Addressable space mask.
Definition: dma.h:52
void dma_free(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
#define DMAAPI_INLINE(_subsys, _api_func)
Calculate static inline DMA I/O API function name.
Definition: dma.h:150
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
ring len
Length.
Definition: dwmac.h:231
static __always_inline int dma_mapped(struct dma_mapping *map)
Check if DMA unmapping is required.
Definition: dma.h:442
void(* unmap)(struct dma_device *dma, struct dma_mapping *map, size_t len)
Unmap buffer.
Definition: dma.h:80
uint8_t flags
Flags.
Definition: ena.h:18
physaddr_t offset
Address offset.
Definition: dma.h:40
void * dma_umalloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer from external (user) memory.
void(* set_mask)(struct dma_device *dma, physaddr_t mask)
Set addressable space mask.
Definition: dma.h:131
#define __always_inline
Declare a function to be always inline.
Definition: compiler.h:611
uint32_t addr
Buffer address.
Definition: dwmac.h:20
User memory allocation.
DMA operations.
Definition: dma.h:60
void dma_ufree(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer from external (user) memory.
void * dma_alloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer.
unsigned int mapped
Total number of mappings (for debugging)
Definition: dma.h:54
unsigned long physaddr_t
Definition: stdint.h:20
static uint16_t struct vmbus_xfer_pages_operations * op
Definition: netvsc.h:327
unsigned int allocated
Total number of allocations (for debugging)
Definition: dma.h:56
I/O API configuration.
static __always_inline void dma_init(struct dma_device *dma, struct dma_operations *op)
Initialise DMA device.
Definition: dma.h:454
static __always_inline int struct dma_mapping * map
Definition: dma.h:184
int(* map)(struct dma_device *dma, struct dma_mapping *map, void *addr, size_t len, int flags)
Map buffer for DMA.
Definition: dma.h:71
static __always_inline void * umalloc(size_t size)
Allocate external memory.
Definition: umalloc.h:57
void * token
Platform mapping token.
Definition: dma.h:44
void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.c:723
void(* free)(struct dma_device *dma, struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
Definition: dma.h:101
#define DBG_LOG
Definition: compiler.h:317
A DMA mapping.
Definition: dma.h:33
#define NULL
NULL pointer (VOID *)
Definition: Base.h:322
physaddr_t dma(struct dma_mapping *map, void *addr)
Get DMA address from virtual address.
void * malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.c:707
A DMA-capable device.
Definition: dma.h:48