iPXE
memory.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: MIT */
2 /******************************************************************************
3  * memory.h
4  *
5  * Memory reservation and information.
6  *
7  * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
8  */
9 
10 #ifndef __XEN_PUBLIC_MEMORY_H__
11 #define __XEN_PUBLIC_MEMORY_H__
12 
13 FILE_LICENCE ( MIT );
14 
15 #include "xen.h"
16 #include "physdev.h"
17 
18 /*
19  * Increase or decrease the specified domain's memory reservation. Returns the
20  * number of extents successfully allocated or freed.
21  * arg == addr of struct xen_memory_reservation.
22  */
23 #define XENMEM_increase_reservation 0
24 #define XENMEM_decrease_reservation 1
25 #define XENMEM_populate_physmap 6
26 
27 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
28 /*
29  * Maximum # bits addressable by the user of the allocated region (e.g., I/O
30  * devices often have a 32-bit limitation even in 64-bit systems). If zero
31  * then the user has no addressing restriction. This field is not used by
32  * XENMEM_decrease_reservation.
33  */
34 #define XENMEMF_address_bits(x) (x)
35 #define XENMEMF_get_address_bits(x) ((x) & 0xffu)
36 /* NUMA node to allocate from. */
37 #define XENMEMF_node(x) (((x) + 1) << 8)
38 #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
39 /* Flag to populate physmap with populate-on-demand entries */
40 #define XENMEMF_populate_on_demand (1<<16)
41 /* Flag to request allocation only from the node specified */
42 #define XENMEMF_exact_node_request (1<<17)
43 #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
44 /* Flag to indicate the node specified is virtual node */
45 #define XENMEMF_vnode (1<<18)
46 #endif
47 
49 
50  /*
51  * XENMEM_increase_reservation:
52  * OUT: MFN (*not* GMFN) bases of extents that were allocated
53  * XENMEM_decrease_reservation:
54  * IN: GMFN bases of extents to free
55  * XENMEM_populate_physmap:
56  * IN: GPFN bases of extents to populate with memory
57  * OUT: GMFN bases of extents that were allocated
58  * (NB. This command also updates the mach_to_phys translation table)
59  * XENMEM_claim_pages:
60  * IN: must be zero
61  */
62  XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
63 
64  /* Number of extents, and size/alignment of each (2^extent_order pages). */
66  unsigned int extent_order;
67 
68 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
69  /* XENMEMF flags. */
70  unsigned int mem_flags;
71 #else
72  unsigned int address_bits;
73 #endif
74 
75  /*
76  * Domain whose reservation is being changed.
77  * Unprivileged domains can specify only DOMID_SELF.
78  */
80 };
83 
84 /*
85  * An atomic exchange of memory pages. If return code is zero then
86  * @out.extent_list provides GMFNs of the newly-allocated memory.
87  * Returns zero on complete success, otherwise a negative error code.
88  * On complete success then always @nr_exchanged == @in.nr_extents.
89  * On partial success @nr_exchanged indicates how much work was done.
90  *
91  * Note that only PV guests can use this operation.
92  */
93 #define XENMEM_exchange 11
95  /*
96  * [IN] Details of memory extents to be exchanged (GMFN bases).
97  * Note that @in.address_bits is ignored and unused.
98  */
100 
101  /*
102  * [IN/OUT] Details of new memory extents.
103  * We require that:
104  * 1. @in.domid == @out.domid
105  * 2. @in.nr_extents << @in.extent_order ==
106  * @out.nr_extents << @out.extent_order
107  * 3. @in.extent_start and @out.extent_start lists must not overlap
108  * 4. @out.extent_start lists GPFN bases to be populated
109  * 5. @out.extent_start is overwritten with allocated GMFN bases
110  */
112 
113  /*
114  * [OUT] Number of input extents that were successfully exchanged:
115  * 1. The first @nr_exchanged input extents were successfully
116  * deallocated.
117  * 2. The corresponding first entries in the output extent list correctly
118  * indicate the GMFNs that were successfully exchanged.
119  * 3. All other input and output extents are untouched.
120  * 4. If not all input exents are exchanged then the return code of this
121  * command will be non-zero.
122  * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
123  */
125 };
128 
129 /*
130  * Returns the maximum machine frame number of mapped RAM in this system.
131  * This command always succeeds (it never returns an error code).
132  * arg == NULL.
133  */
134 #define XENMEM_maximum_ram_page 2
135 
137  /* [IN] Domain information is being queried for. */
139 };
140 
141 /*
142  * Returns the current or maximum memory reservation, in pages, of the
143  * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
144  * arg == addr of struct xen_memory_domain.
145  */
146 #define XENMEM_current_reservation 3
147 #define XENMEM_maximum_reservation 4
148 
149 /*
150  * Returns the maximum GFN in use by the specified domain (may be DOMID_SELF).
151  * Returns -ve errcode on failure.
152  * arg == addr of struct xen_memory_domain.
153  */
154 #define XENMEM_maximum_gpfn 14
155 
156 /*
157  * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
158  * mapping table. Architectures which do not have a m2p table do not implement
159  * this command.
160  * arg == addr of xen_machphys_mfn_list_t.
161  */
162 #define XENMEM_machphys_mfn_list 5
164  /*
165  * Size of the 'extent_start' array. Fewer entries will be filled if the
166  * machphys table is smaller than max_extents * 2MB.
167  */
168  unsigned int max_extents;
169 
170  /*
171  * Pointer to buffer to fill with list of extent starts. If there are
172  * any large discontiguities in the machine address space, 2MB gaps in
173  * the machphys table will be represented by an MFN base of zero.
174  */
175  XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
176 
177  /*
178  * Number of extents written to the above array. This will be smaller
179  * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
180  */
181  unsigned int nr_extents;
182 };
185 
186 /*
187  * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
188  *
189  * For a non compat caller, this functions similarly to
190  * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
191  * m2p table.
192  */
193 #define XENMEM_machphys_compat_mfn_list 25
194 
195 /*
196  * Returns the location in virtual address space of the machine_to_phys
197  * mapping table. Architectures which do not have a m2p table, or which do not
198  * map it by default into guest address space, do not implement this command.
199  * arg == addr of xen_machphys_mapping_t.
200  */
201 #define XENMEM_machphys_mapping 12
203  xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
204  xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
205 };
208 
209 /* Source mapping space. */
210 /* ` enum phys_map_space { */
211 #define XENMAPSPACE_shared_info 0 /* shared info page */
212 #define XENMAPSPACE_grant_table 1 /* grant table page */
213 #define XENMAPSPACE_gmfn 2 /* GMFN */
214 #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
215 #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
216  * XENMEM_add_to_physmap_batch only. */
217 #define XENMAPSPACE_dev_mmio 5 /* device mmio region
218  ARM only; the region is mapped in
219  Stage-2 using the Normal Memory
220  Inner/Outer Write-Back Cacheable
221  memory attribute. */
222 /* ` } */
223 
224 /*
225  * Sets the GPFN at which a particular page appears in the specified guest's
226  * physical address space (translated guests only).
227  * arg == addr of xen_add_to_physmap_t.
228  */
229 #define XENMEM_add_to_physmap 7
231  /* Which domain to change the mapping for. */
233 
234  /* Number of pages to go through for gmfn_range */
236 
237  unsigned int space; /* => enum phys_map_space */
238 
239 #define XENMAPIDX_grant_table_status 0x80000000
240 
241  /* Index into space being mapped. */
243 
244  /* GPFN in domid where the source mapping page should appear. */
246 };
249 
250 /* A batched version of add_to_physmap. */
251 #define XENMEM_add_to_physmap_batch 23
253  /* IN */
254  /* Which domain to change the mapping for. */
256  uint16_t space; /* => enum phys_map_space */
257 
258  /* Number of pages to go through */
260 
261 #if __XEN_INTERFACE_VERSION__ < 0x00040700
262  domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
263 #else
264  union xen_add_to_physmap_batch_extra {
265  domid_t foreign_domid; /* gmfn_foreign */
266  uint16_t res0; /* All the other spaces. Should be 0 */
267  } u;
268 #endif
269 
270  /* Indexes into space being mapped. */
272 
273  /* GPFN in domid where the source mapping page should appear. */
275 
276  /* OUT */
277 
278  /* Per index error code. */
279  XEN_GUEST_HANDLE(int) errs;
280 };
283 
284 #if __XEN_INTERFACE_VERSION__ < 0x00040400
285 #define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
286 #define xen_add_to_physmap_range xen_add_to_physmap_batch
289 #endif
290 
291 /*
292  * Unmaps the page appearing at a particular GPFN from the specified guest's
293  * physical address space (translated guests only).
294  * arg == addr of xen_remove_from_physmap_t.
295  */
296 #define XENMEM_remove_from_physmap 15
298  /* Which domain to change the mapping for. */
300 
301  /* GPFN of the current mapping of the page. */
303 };
306 
307 /*** REMOVED ***/
308 /*#define XENMEM_translate_gpfn_list 8*/
309 
310 /*
311  * Returns the pseudo-physical memory map as it was when the domain
312  * was started (specified by XENMEM_set_memory_map).
313  * arg == addr of xen_memory_map_t.
314  */
315 #define XENMEM_memory_map 9
317  /*
318  * On call the number of entries which can be stored in buffer. On
319  * return the number of entries which have been stored in
320  * buffer.
321  */
322  unsigned int nr_entries;
323 
324  /*
325  * Entries in the buffer are in the same format as returned by the
326  * BIOS INT 0x15 EAX=0xE820 call.
327  */
328  XEN_GUEST_HANDLE(void) buffer;
329 };
332 
333 /*
334  * Returns the real physical memory map. Passes the same structure as
335  * XENMEM_memory_map.
336  * Specifying buffer as NULL will return the number of entries required
337  * to store the complete memory map.
338  * arg == addr of xen_memory_map_t.
339  */
340 #define XENMEM_machine_memory_map 10
341 
342 /*
343  * Set the pseudo-physical memory map of a domain, as returned by
344  * XENMEM_memory_map.
345  * arg == addr of xen_foreign_memory_map_t.
346  */
347 #define XENMEM_set_memory_map 13
351 };
354 
355 #define XENMEM_set_pod_target 16
356 #define XENMEM_get_pod_target 17
358  /* IN */
360  /* OUT */
364  /* IN */
366 };
368 
369 #if defined(__XEN__) || defined(__XEN_TOOLS__)
370 
371 #ifndef uint64_aligned_t
372 #define uint64_aligned_t uint64_t
373 #endif
374 
375 /*
376  * Get the number of MFNs saved through memory sharing.
377  * The call never fails.
378  */
379 #define XENMEM_get_sharing_freed_pages 18
380 #define XENMEM_get_sharing_shared_pages 19
381 
382 #define XENMEM_paging_op 20
383 #define XENMEM_paging_op_nominate 0
384 #define XENMEM_paging_op_evict 1
385 #define XENMEM_paging_op_prep 2
386 
387 struct xen_mem_paging_op {
388  uint8_t op; /* XENMEM_paging_op_* */
389  domid_t domain;
390 
391  /* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */
392  XEN_GUEST_HANDLE_64(const_uint8) buffer;
393  /* IN: gfn of page being operated on */
394  uint64_aligned_t gfn;
395 };
396 typedef struct xen_mem_paging_op xen_mem_paging_op_t;
397 DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
398 
399 #define XENMEM_access_op 21
400 #define XENMEM_access_op_set_access 0
401 #define XENMEM_access_op_get_access 1
402 /*
403  * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are
404  * currently unused, but since they have been in use please do not reuse them.
405  *
406  * #define XENMEM_access_op_enable_emulate 2
407  * #define XENMEM_access_op_disable_emulate 3
408  */
409 #define XENMEM_access_op_set_access_multi 4
410 
411 typedef enum {
412  XENMEM_access_n,
413  XENMEM_access_r,
414  XENMEM_access_w,
415  XENMEM_access_rw,
416  XENMEM_access_x,
417  XENMEM_access_rx,
418  XENMEM_access_wx,
419  XENMEM_access_rwx,
420  /*
421  * Page starts off as r-x, but automatically
422  * change to r-w on a write
423  */
424  XENMEM_access_rx2rw,
425  /*
426  * Log access: starts off as n, automatically
427  * goes to rwx, generating an event without
428  * pausing the vcpu
429  */
430  XENMEM_access_n2rwx,
431  /* Take the domain default */
432  XENMEM_access_default
433 } xenmem_access_t;
434 
435 struct xen_mem_access_op {
436  /* XENMEM_access_op_* */
437  uint8_t op;
438  /* xenmem_access_t */
439  uint8_t access;
440  domid_t domid;
441  /*
442  * Number of pages for set op (or size of pfn_list for
443  * XENMEM_access_op_set_access_multi)
444  * Ignored on setting default access and other ops
445  */
446  uint32_t nr;
447  /*
448  * First pfn for set op
449  * pfn for get op
450  * ~0ull is used to set and get the default access for pages
451  */
452  uint64_aligned_t pfn;
453  /*
454  * List of pfns to set access for
455  * Used only with XENMEM_access_op_set_access_multi
456  */
457  XEN_GUEST_HANDLE(const_uint64) pfn_list;
458  /*
459  * Corresponding list of access settings for pfn_list
460  * Used only with XENMEM_access_op_set_access_multi
461  */
462  XEN_GUEST_HANDLE(const_uint8) access_list;
463 };
464 typedef struct xen_mem_access_op xen_mem_access_op_t;
465 DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
466 
467 #define XENMEM_sharing_op 22
468 #define XENMEM_sharing_op_nominate_gfn 0
469 #define XENMEM_sharing_op_nominate_gref 1
470 #define XENMEM_sharing_op_share 2
471 #define XENMEM_sharing_op_debug_gfn 3
472 #define XENMEM_sharing_op_debug_mfn 4
473 #define XENMEM_sharing_op_debug_gref 5
474 #define XENMEM_sharing_op_add_physmap 6
475 #define XENMEM_sharing_op_audit 7
476 #define XENMEM_sharing_op_range_share 8
477 #define XENMEM_sharing_op_fork 9
478 #define XENMEM_sharing_op_fork_reset 10
479 
480 #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
481 #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
482 
483 /* The following allows sharing of grant refs. This is useful
484  * for sharing utilities sitting as "filters" in IO backends
485  * (e.g. memshr + blktap(2)). The IO backend is only exposed
486  * to grant references, and this allows sharing of the grefs */
487 #define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62)
488 
489 #define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
490  (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | (val))
491 #define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
492  ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
493 #define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
494  ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
495 
496 struct xen_mem_sharing_op {
497  uint8_t op; /* XENMEM_sharing_op_* */
498  domid_t domain;
499 
500  union {
501  struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
502  union {
503  uint64_aligned_t gfn; /* IN: gfn to nominate */
504  uint32_t grant_ref; /* IN: grant ref to nominate */
505  } u;
506  uint64_aligned_t handle; /* OUT: the handle */
507  } nominate;
508  struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
509  uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
510  uint64_aligned_t source_handle; /* IN: handle to the source page */
511  uint64_aligned_t client_gfn; /* IN: the client gfn */
512  uint64_aligned_t client_handle; /* IN: handle to the client page */
513  domid_t client_domain; /* IN: the client domain id */
514  } share;
515  struct mem_sharing_op_range { /* OP_RANGE_SHARE */
516  uint64_aligned_t first_gfn; /* IN: the first gfn */
517  uint64_aligned_t last_gfn; /* IN: the last gfn */
518  uint64_aligned_t opaque; /* Must be set to 0 */
519  domid_t client_domain; /* IN: the client domain id */
520  uint16_t _pad[3]; /* Must be set to 0 */
521  } range;
522  struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
523  union {
524  uint64_aligned_t gfn; /* IN: gfn to debug */
525  uint64_aligned_t mfn; /* IN: mfn to debug */
526  uint32_t gref; /* IN: gref to debug */
527  } u;
528  } debug;
529  struct mem_sharing_op_fork { /* OP_FORK{,_RESET} */
530  domid_t parent_domain; /* IN: parent's domain id */
531 /* Only makes sense for short-lived forks */
532 #define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
533 /* Only makes sense for short-lived forks */
534 #define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
535 #define XENMEM_FORK_RESET_STATE (1u << 2)
536 #define XENMEM_FORK_RESET_MEMORY (1u << 3)
537  uint16_t flags; /* IN: optional settings */
538  uint32_t pad; /* Must be set to 0 */
539  } fork;
540  } u;
541 };
542 typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
543 DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
544 
545 /*
546  * Attempt to stake a claim for a domain on a quantity of pages
547  * of system RAM, but _not_ assign specific pageframes. Only
548  * arithmetic is performed so the hypercall is very fast and need
549  * not be preemptible, thus sidestepping time-of-check-time-of-use
550  * races for memory allocation. Returns 0 if the hypervisor page
551  * allocator has atomically and successfully claimed the requested
552  * number of pages, else non-zero.
553  *
554  * Any domain may have only one active claim. When sufficient memory
555  * has been allocated to resolve the claim, the claim silently expires.
556  * Claiming zero pages effectively resets any outstanding claim and
557  * is always successful.
558  *
559  * Note that a valid claim may be staked even after memory has been
560  * allocated for a domain. In this case, the claim is not incremental,
561  * i.e. if the domain's total page count is 3, and a claim is staked
562  * for 10, only 7 additional pages are claimed.
563  *
564  * Caller must be privileged or the hypercall fails.
565  */
566 #define XENMEM_claim_pages 24
567 
568 /*
569  * XENMEM_claim_pages flags - the are no flags at this time.
570  * The zero value is appropriate.
571  */
572 
573 /*
574  * With some legacy devices, certain guest-physical addresses cannot safely
575  * be used for other purposes, e.g. to map guest RAM. This hypercall
576  * enumerates those regions so the toolstack can avoid using them.
577  */
578 #define XENMEM_reserved_device_memory_map 27
579 struct xen_reserved_device_memory {
580  xen_pfn_t start_pfn;
581  xen_ulong_t nr_pages;
582 };
583 typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
584 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
585 
586 struct xen_reserved_device_memory_map {
587 #define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
588  /* IN */
589  uint32_t flags;
590  /*
591  * IN/OUT
592  *
593  * Gets set to the required number of entries when too low,
594  * signaled by error code -ERANGE.
595  */
596  unsigned int nr_entries;
597  /* OUT */
598  XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
599  /* IN */
600  union {
602  } dev;
603 };
604 typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
605 DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
606 
607 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
608 
609 /*
610  * Get the pages for a particular guest resource, so that they can be
611  * mapped directly by a tools domain.
612  */
613 #define XENMEM_acquire_resource 28
615  /* IN - The domain whose resource is to be mapped */
617  /* IN - the type of resource */
619 
620 #define XENMEM_resource_ioreq_server 0
621 #define XENMEM_resource_grant_table 1
622 #define XENMEM_resource_vmtrace_buf 2
623 
624  /*
625  * IN - a type-specific resource identifier, which must be zero
626  * unless stated otherwise.
627  *
628  * type == XENMEM_resource_ioreq_server -> id == ioreq server id
629  * type == XENMEM_resource_grant_table -> id defined below
630  */
632 
633 #define XENMEM_resource_grant_table_id_shared 0
634 #define XENMEM_resource_grant_table_id_status 1
635 
636  /*
637  * IN/OUT
638  *
639  * As an IN parameter number of frames of the resource to be mapped.
640  * This value may be updated over the course of the operation.
641  *
642  * When frame_list is NULL and nr_frames is 0, this is interpreted as a
643  * request for the size of the resource, which shall be returned in the
644  * nr_frames field.
645  *
646  * The size of a resource will never be zero, but a nonzero result doesn't
647  * guarantee that a subsequent mapping request will be successful. There
648  * are further type/id specific constraints which may change between the
649  * two calls.
650  */
652  /*
653  * Padding field, must be zero on input.
654  * In a previous version this was an output field with the lowest bit
655  * named XENMEM_rsrc_acq_caller_owned. Future versions of this interface
656  * will not reuse this bit as an output with the field being zero on
657  * input.
658  */
660  /*
661  * IN - the index of the initial frame to be mapped. This parameter
662  * is ignored if nr_frames is 0. This value may be updated
663  * over the course of the operation.
664  */
666 
667 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
668 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
669 
670  /*
671  * IN/OUT - If the tools domain is PV then, upon return, frame_list
672  * will be populated with the MFNs of the resource.
673  * If the tools domain is HVM then it is expected that, on
674  * entry, frame_list will be populated with a list of GFNs
675  * that will be mapped to the MFNs of the resource.
676  * If -EIO is returned then the frame_list has only been
677  * partially mapped and it is up to the caller to unmap all
678  * the GFNs.
679  * This parameter may be NULL if nr_frames is 0. This
680  * value may be updated over the course of the operation.
681  */
682  XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
683 };
686 
687 /*
688  * XENMEM_get_vnumainfo used by guest to get
689  * vNUMA topology from hypervisor.
690  */
691 #define XENMEM_get_vnumainfo 26
692 
693 /* vNUMA node memory ranges */
696  unsigned int flags;
697  unsigned int nid;
698 };
701 
702 /*
703  * vNUMA topology specifies vNUMA node number, distance table,
704  * memory ranges and vcpu mapping provided for guests.
705  * XENMEM_get_vnumainfo hypercall expects to see from guest
706  * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
707  * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
708  * copied back to guest. Domain returns expected values of nr_vnodes,
709  * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
710  */
712  /* IN */
715  /* IN/OUT */
716  unsigned int nr_vnodes;
717  unsigned int nr_vcpus;
718  unsigned int nr_vmemranges;
719  /* OUT */
720  union {
721  XEN_GUEST_HANDLE(uint) h;
723  } vdistance;
724  union {
725  XEN_GUEST_HANDLE(uint) h;
726  uint64_t pad;
727  } vcpu_to_vnode;
728  union {
730  uint64_t pad;
731  } vmemrange;
732 };
735 
736 /* Next available subop number is 29 */
737 
738 #endif /* __XEN_PUBLIC_MEMORY_H__ */
739 
740 /*
741  * Local variables:
742  * mode: C
743  * c-file-style: "BSD"
744  * c-basic-offset: 4
745  * tab-width: 4
746  * indent-tabs-mode: nil
747  * End:
748  */
XEN_GUEST_HANDLE(void) buffer
union xen_vnuma_topology_info::@614 vcpu_to_vnode
uint64_t pod_cache_pages
Definition: memory.h:362
unsigned short uint16_t
Definition: stdint.h:11
unsigned int nr_vnodes
Definition: memory.h:716
uint16_t size
Definition: memory.h:235
static __always_inline void struct pci_range * range
Definition: efi_pci_api.h:43
uint16_t domid_t
Definition: xen.h:608
XEN_GUEST_HANDLE(xen_pfn_t) extent_start
FILE_LICENCE(MIT)
xen_ulong_t idx
Definition: memory.h:242
struct xen_memory_reservation in
Definition: memory.h:99
XEN_GUEST_HANDLE(xen_pfn_t) extent_start
XEN_GUEST_HANDLE(xen_pfn_t) frame_list
domid_t domid
Definition: memory.h:138
struct xen_memory_reservation out
Definition: memory.h:111
unsigned long long uint64_t
Definition: stdint.h:13
DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t)
uint32_t buffer
Buffer index (or NETVSC_RNDIS_NO_BUFFER)
Definition: netvsc.h:16
u32 pad[9]
Padding.
Definition: ar9003_mac.h:90
unsigned int max_extents
Definition: memory.h:168
union xen_vnuma_topology_info::@615 vmemrange
xen_pfn_t gpfn
Definition: memory.h:245
XEN_GUEST_HANDLE(xen_ulong_t) idxs
uint32_t h
Definition: sha256.c:35
struct xen_memory_map map
Definition: memory.h:350
xen_ulong_t nr_extents
Definition: memory.h:65
uint64_t start
Definition: memory.h:695
xen_ulong_t v_start
Definition: memory.h:203
uint64_t pod_entries
Definition: memory.h:363
uint64_t debug
Debug area base address.
Definition: ena.h:14
xen_ulong_t max_mfn
Definition: memory.h:204
union xen_vnuma_topology_info::@613 vdistance
#define uint64_aligned_t
Definition: arch-arm.h:158
unsigned int space
Definition: memory.h:237
domid_t domid
Definition: memory.h:365
static grant_ref_t domid_t domid
Definition: xengrant.h:173
unsigned long xen_pfn_t
Definition: nonxen.h:25
unsigned char uint8_t
Definition: stdint.h:10
domid_t domid
Definition: memory.h:232
uint64_t end
Definition: memory.h:695
uint64_t target_pages
Definition: memory.h:359
unsigned int extent_order
Definition: memory.h:66
unsigned int uint32_t
Definition: stdint.h:12
static uint16_t struct vmbus_xfer_pages_operations * op
Definition: netvsc.h:327
unsigned int flags
Definition: memory.h:696
unsigned int nr_entries
Definition: memory.h:322
xen_ulong_t v_end
Definition: memory.h:203
uint64_t tot_pages
Definition: memory.h:361
union @17 u
unsigned int nr_extents
Definition: memory.h:181
unsigned int address_bits
Definition: memory.h:72
unsigned long xen_ulong_t
Definition: nonxen.h:27
xen_ulong_t nr_exchanged
Definition: memory.h:124
unsigned int nr_vmemranges
Definition: memory.h:718
unsigned int nr_vcpus
Definition: memory.h:717
uint16_t handle
Handle.
Definition: smbios.h:16
#define XEN_GUEST_HANDLE(name)
Definition: nonxen.h:14
unsigned int nid
Definition: memory.h:697
struct ntlm_data domain
Domain name.
Definition: ntlm.h:16
uint8_t flags
Flags.
Definition: ena.h:18