iPXE
dma.h
Go to the documentation of this file.
1 #ifndef _IPXE_DMA_H
2 #define _IPXE_DMA_H
3 
4 /** @file
5  *
6  * DMA mappings
7  *
8  */
9 
10 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11 
12 #include <stdint.h>
13 #include <ipxe/api.h>
14 #include <ipxe/io.h>
15 #include <ipxe/malloc.h>
16 #include <ipxe/umalloc.h>
17 #include <config/ioapi.h>
18 
19 #ifdef DMAAPI_OP
20 #define DMAAPI_PREFIX_op
21 #else
22 #define DMAAPI_PREFIX_op __op_
23 #endif
24 
25 #ifdef DMAAPI_FLAT
26 #define DMAAPI_PREFIX_flat
27 #else
28 #define DMAAPI_PREFIX_flat __flat_
29 #endif
30 
31 /** A DMA mapping */
32 struct dma_mapping {
33  /** Address offset
34  *
35  * This is the value that must be added to a physical address
36  * within the mapping in order to produce the corresponding
37  * device-side DMA address.
38  */
40  /** DMA device (if unmapping is required) */
41  struct dma_device *dma;
42  /** Platform mapping token */
43  void *token;
44 };
45 
46 /** A DMA-capable device */
47 struct dma_device {
48  /** DMA operations */
49  struct dma_operations *op;
50  /** Addressable space mask */
52  /** Total number of mappings (for debugging) */
53  unsigned int mapped;
54  /** Total number of allocations (for debugging) */
55  unsigned int allocated;
56 };
57 
58 /** DMA operations */
60  /**
61  * Map buffer for DMA
62  *
63  * @v dma DMA device
64  * @v map DMA mapping to fill in
65  * @v addr Buffer address
66  * @v len Length of buffer
67  * @v flags Mapping flags
68  * @ret rc Return status code
69  */
70  int ( * map ) ( struct dma_device *dma, struct dma_mapping *map,
71  void *addr, size_t len, int flags );
72  /**
73  * Unmap buffer
74  *
75  * @v dma DMA device
76  * @v map DMA mapping
77  * @v len Used length
78  */
79  void ( * unmap ) ( struct dma_device *dma, struct dma_mapping *map,
80  size_t len );
81  /**
82  * Allocate and map DMA-coherent buffer
83  *
84  * @v dma DMA device
85  * @v map DMA mapping to fill in
86  * @v len Length of buffer
87  * @v align Physical alignment
88  * @ret addr Buffer address, or NULL on error
89  */
90  void * ( * alloc ) ( struct dma_device *dma, struct dma_mapping *map,
91  size_t len, size_t align );
92  /**
93  * Unmap and free DMA-coherent buffer
94  *
95  * @v dma DMA device
96  * @v map DMA mapping
97  * @v addr Buffer address
98  * @v len Length of buffer
99  */
100  void ( * free ) ( struct dma_device *dma, struct dma_mapping *map,
101  void *addr, size_t len );
102  /**
103  * Allocate and map DMA-coherent buffer from external (user) memory
104  *
105  * @v dma DMA device
106  * @v map DMA mapping to fill in
107  * @v len Length of buffer
108  * @v align Physical alignment
109  * @ret addr Buffer address, or NULL on error
110  */
111  void * ( * umalloc ) ( struct dma_device *dma,
112  struct dma_mapping *map,
113  size_t len, size_t align );
114  /**
115  * Unmap and free DMA-coherent buffer from external (user) memory
116  *
117  * @v dma DMA device
118  * @v map DMA mapping
119  * @v addr Buffer address
120  * @v len Length of buffer
121  */
122  void ( * ufree ) ( struct dma_device *dma, struct dma_mapping *map,
123  void *addr, size_t len );
124  /**
125  * Set addressable space mask
126  *
127  * @v dma DMA device
128  * @v mask Addressable space mask
129  */
130  void ( * set_mask ) ( struct dma_device *dma, physaddr_t mask );
131 };
132 
133 /** Device will read data from host memory */
134 #define DMA_TX 0x01
135 
136 /** Device will write data to host memory */
137 #define DMA_RX 0x02
138 
139 /** Device will both read data from and write data to host memory */
140 #define DMA_BI ( DMA_TX | DMA_RX )
141 
142 /**
143  * Calculate static inline DMA I/O API function name
144  *
145  * @v _prefix Subsystem prefix
146  * @v _api_func API function
147  * @ret _subsys_func Subsystem API function
148  */
149 #define DMAAPI_INLINE( _subsys, _api_func ) \
150  SINGLE_API_INLINE ( DMAAPI_PREFIX_ ## _subsys, _api_func )
151 
152 /**
153  * Provide a DMA I/O API implementation
154  *
155  * @v _prefix Subsystem prefix
156  * @v _api_func API function
157  * @v _func Implementing function
158  */
159 #define PROVIDE_DMAAPI( _subsys, _api_func, _func ) \
160  PROVIDE_SINGLE_API ( DMAAPI_PREFIX_ ## _subsys, _api_func, _func )
161 
162 /**
163  * Provide a static inline DMA I/O API implementation
164  *
165  * @v _prefix Subsystem prefix
166  * @v _api_func API function
167  */
168 #define PROVIDE_DMAAPI_INLINE( _subsys, _api_func ) \
169  PROVIDE_SINGLE_API_INLINE ( DMAAPI_PREFIX_ ## _subsys, _api_func )
170 
171 /**
172  * Map buffer for DMA
173  *
174  * @v dma DMA device
175  * @v map DMA mapping to fill in
176  * @v addr Buffer address
177  * @v len Length of buffer
178  * @v flags Mapping flags
179  * @ret rc Return status code
180  */
181 static inline __always_inline int
182 DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma,
183  struct dma_mapping *map, void *addr __unused,
184  size_t len __unused, int flags __unused ) {
185 
186  /* Increment mapping count (for debugging) */
187  if ( DBG_LOG ) {
188  map->dma = dma;
189  dma->mapped++;
190  }
191 
192  return 0;
193 }
194 
195 /**
196  * Unmap buffer
197  *
198  * @v map DMA mapping
199  * @v len Used length
200  */
201 static inline __always_inline void
202 DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_mapping *map,
203  size_t len __unused ) {
204 
205  /* Decrement mapping count (for debugging) */
206  if ( DBG_LOG ) {
207  assert ( map->dma != NULL );
208  map->dma->mapped--;
209  map->dma = NULL;
210  }
211 }
212 
213 /**
214  * Allocate and map DMA-coherent buffer
215  *
216  * @v dma DMA device
217  * @v map DMA mapping to fill in
218  * @v len Length of buffer
219  * @v align Physical alignment
220  * @ret addr Buffer address, or NULL on error
221  */
222 static inline __always_inline void *
223 DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma,
224  struct dma_mapping *map,
225  size_t len, size_t align ) {
226  void *addr;
227 
228  /* Allocate buffer */
229  addr = malloc_phys ( len, align );
230 
231  /* Increment mapping count (for debugging) */
232  if ( DBG_LOG && addr ) {
233  map->dma = dma;
234  dma->mapped++;
235  }
236 
237  return addr;
238 }
239 
240 /**
241  * Unmap and free DMA-coherent buffer
242  *
243  * @v map DMA mapping
244  * @v addr Buffer address
245  * @v len Length of buffer
246  */
247 static inline __always_inline void
248 DMAAPI_INLINE ( flat, dma_free ) ( struct dma_mapping *map,
249  void *addr, size_t len ) {
250 
251  /* Free buffer */
252  free_phys ( addr, len );
253 
254  /* Decrement mapping count (for debugging) */
255  if ( DBG_LOG ) {
256  assert ( map->dma != NULL );
257  map->dma->mapped--;
258  map->dma = NULL;
259  }
260 }
261 
262 /**
263  * Allocate and map DMA-coherent buffer from external (user) memory
264  *
265  * @v dma DMA device
266  * @v map DMA mapping to fill in
267  * @v len Length of buffer
268  * @v align Physical alignment
269  * @ret addr Buffer address, or NULL on error
270  */
271 static inline __always_inline void *
272 DMAAPI_INLINE ( flat, dma_umalloc ) ( struct dma_device *dma,
273  struct dma_mapping *map,
274  size_t len, size_t align __unused ) {
275  void *addr;
276 
277  /* Allocate buffer */
278  addr = umalloc ( len );
279 
280  /* Increment mapping count (for debugging) */
281  if ( DBG_LOG && addr ) {
282  map->dma = dma;
283  dma->mapped++;
284  }
285 
286  return addr;
287 }
288 
289 /**
290  * Unmap and free DMA-coherent buffer from external (user) memory
291  *
292  * @v map DMA mapping
293  * @v addr Buffer address
294  * @v len Length of buffer
295  */
296 static inline __always_inline void
297 DMAAPI_INLINE ( flat, dma_ufree ) ( struct dma_mapping *map,
298  void *addr, size_t len __unused ) {
299 
300  /* Free buffer */
301  ufree ( addr );
302 
303  /* Decrement mapping count (for debugging) */
304  if ( DBG_LOG ) {
305  assert ( map->dma != NULL );
306  map->dma->mapped--;
307  map->dma = NULL;
308  }
309 }
310 
311 /**
312  * Set addressable space mask
313  *
314  * @v dma DMA device
315  * @v mask Addressable space mask
316  */
317 static inline __always_inline void
318 DMAAPI_INLINE ( flat, dma_set_mask ) ( struct dma_device *dma __unused,
320 
321  /* Nothing to do */
322 }
323 
324 /**
325  * Get DMA address from virtual address
326  *
327  * @v map DMA mapping
328  * @v addr Address within the mapped region
329  * @ret addr Device-side DMA address
330  */
331 static inline __always_inline physaddr_t
332 DMAAPI_INLINE ( flat, dma ) ( struct dma_mapping *map __unused, void *addr ) {
333 
334  /* Use physical address as device address */
335  return virt_to_phys ( addr );
336 }
337 
338 /**
339  * Get DMA address from virtual address
340  *
341  * @v map DMA mapping
342  * @v addr Address within the mapped region
343  * @ret addr Device-side DMA address
344  */
345 static inline __always_inline physaddr_t
346 DMAAPI_INLINE ( op, dma ) ( struct dma_mapping *map, void *addr ) {
347 
348  /* Adjust physical address using mapping offset */
349  return ( virt_to_phys ( addr ) + map->offset );
350 }
351 
352 /* Include all architecture-dependent DMA API headers */
353 #include <bits/dma.h>
354 
355 /**
356  * Map buffer for DMA
357  *
358  * @v dma DMA device
359  * @v map DMA mapping to fill in
360  * @v addr Buffer address
361  * @v len Length of buffer
362  * @v flags Mapping flags
363  * @ret rc Return status code
364  */
365 int dma_map ( struct dma_device *dma, struct dma_mapping *map,
366  void *addr, size_t len, int flags );
367 
368 /**
369  * Unmap buffer
370  *
371  * @v map DMA mapping
372  * @v len Used length
373  */
374 void dma_unmap ( struct dma_mapping *map, size_t len );
375 
376 /**
377  * Allocate and map DMA-coherent buffer
378  *
379  * @v dma DMA device
380  * @v map DMA mapping to fill in
381  * @v len Length of buffer
382  * @v align Physical alignment
383  * @ret addr Buffer address, or NULL on error
384  */
385 void * dma_alloc ( struct dma_device *dma, struct dma_mapping *map,
386  size_t len, size_t align );
387 
388 /**
389  * Unmap and free DMA-coherent buffer
390  *
391  * @v map DMA mapping
392  * @v addr Buffer address
393  * @v len Length of buffer
394  */
395 void dma_free ( struct dma_mapping *map, void *addr, size_t len );
396 
397 /**
398  * Allocate and map DMA-coherent buffer from external (user) memory
399  *
400  * @v dma DMA device
401  * @v map DMA mapping to fill in
402  * @v len Length of buffer
403  * @v align Physical alignment
404  * @ret addr Buffer address, or NULL on error
405  */
406 void * dma_umalloc ( struct dma_device *dma, struct dma_mapping *map,
407  size_t len, size_t align );
408 
409 /**
410  * Unmap and free DMA-coherent buffer from external (user) memory
411  *
412  * @v map DMA mapping
413  * @v addr Buffer address
414  * @v len Length of buffer
415  */
416 void dma_ufree ( struct dma_mapping *map, void *addr, size_t len );
417 
418 /**
419  * Set addressable space mask
420  *
421  * @v dma DMA device
422  * @v mask Addressable space mask
423  */
424 void dma_set_mask ( struct dma_device *dma, physaddr_t mask );
425 
426 /**
427  * Get DMA address from virtual address
428  *
429  * @v map DMA mapping
430  * @v addr Address within the mapped region
431  * @ret addr Device-side DMA address
432  */
433 physaddr_t dma ( struct dma_mapping *map, void *addr );
434 
435 /**
436  * Check if DMA unmapping is required
437  *
438  * @v map DMA mapping
439  * @v unmap Unmapping is required
440  */
441 static inline __always_inline int dma_mapped ( struct dma_mapping *map ) {
442 
443  /* Unmapping is required if a DMA device was recorded */
444  return ( map->dma != NULL );
445 }
446 
447 /**
448  * Initialise DMA device
449  *
450  * @v dma DMA device
451  * @v op DMA operations
452  */
453 static inline __always_inline void dma_init ( struct dma_device *dma,
454  struct dma_operations *op ) {
455 
456  /* Set operations table */
457  dma->op = op;
458 }
459 
460 /**
461  * Set 64-bit addressable space mask
462  *
463  * @v dma DMA device
464  */
465 static inline __always_inline void
467 
468  /* Set mask to maximum physical address */
469  dma_set_mask ( dma, ~( ( physaddr_t ) 0 ) );
470 }
471 
472 #endif /* _IPXE_DMA_H */
static __always_inline void ufree(void *ptr)
Free external memory.
Definition: umalloc.h:67
iPXE I/O API
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
void(* ufree)(struct dma_device *dma, struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer from external (user) memory.
Definition: dma.h:122
iPXE internal APIs
struct dma_device * dma
DMA device (if unmapping is required)
Definition: dma.h:41
void dma_unmap(struct dma_mapping *map, size_t len)
Unmap buffer.
static __always_inline int struct dma_mapping void *addr __unused
Definition: dma.h:183
struct dma_operations * op
DMA operations.
Definition: dma.h:49
static __always_inline void dma_set_mask_64bit(struct dma_device *dma)
Set 64-bit addressable space mask.
Definition: dma.h:466
void dma_set_mask(struct dma_device *dma, physaddr_t mask)
Set addressable space mask.
Dynamic memory allocation.
Dummy architecture-specific DMA API implementations.
physaddr_t mask
Addressable space mask.
Definition: dma.h:51
void dma_free(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
#define DMAAPI_INLINE(_subsys, _api_func)
Calculate static inline DMA I/O API function name.
Definition: dma.h:149
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
ring len
Length.
Definition: dwmac.h:231
static __always_inline int dma_mapped(struct dma_mapping *map)
Check if DMA unmapping is required.
Definition: dma.h:441
void(* unmap)(struct dma_device *dma, struct dma_mapping *map, size_t len)
Unmap buffer.
Definition: dma.h:79
uint8_t flags
Flags.
Definition: ena.h:18
physaddr_t offset
Address offset.
Definition: dma.h:39
void * dma_umalloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer from external (user) memory.
void(* set_mask)(struct dma_device *dma, physaddr_t mask)
Set addressable space mask.
Definition: dma.h:130
#define __always_inline
Declare a function to be always inline.
Definition: compiler.h:611
uint32_t addr
Buffer address.
Definition: dwmac.h:20
User memory allocation.
DMA operations.
Definition: dma.h:59
void dma_ufree(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer from external (user) memory.
void * dma_alloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer.
unsigned int mapped
Total number of mappings (for debugging)
Definition: dma.h:53
unsigned long physaddr_t
Definition: stdint.h:20
static uint16_t struct vmbus_xfer_pages_operations * op
Definition: netvsc.h:327
unsigned int allocated
Total number of allocations (for debugging)
Definition: dma.h:55
I/O API configuration.
static __always_inline void dma_init(struct dma_device *dma, struct dma_operations *op)
Initialise DMA device.
Definition: dma.h:453
static __always_inline int struct dma_mapping * map
Definition: dma.h:183
int(* map)(struct dma_device *dma, struct dma_mapping *map, void *addr, size_t len, int flags)
Map buffer for DMA.
Definition: dma.h:70
static __always_inline void * umalloc(size_t size)
Allocate external memory.
Definition: umalloc.h:56
void * token
Platform mapping token.
Definition: dma.h:43
void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.c:722
void(* free)(struct dma_device *dma, struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
Definition: dma.h:100
#define DBG_LOG
Definition: compiler.h:317
A DMA mapping.
Definition: dma.h:32
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
physaddr_t dma(struct dma_mapping *map, void *addr)
Get DMA address from virtual address.
void * malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.c:706
A DMA-capable device.
Definition: dma.h:47