iPXE
hvm_op.h
Go to the documentation of this file.
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18  * DEALINGS IN THE SOFTWARE.
19  */
20 
21 #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
22 #define __XEN_PUBLIC_HVM_HVM_OP_H__
23 
24 FILE_LICENCE ( MIT );
25 
26 #include "../xen.h"
27 #include "../trace.h"
28 #include "../event_channel.h"
29 
30 /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
31 #define HVMOP_set_param 0
32 #define HVMOP_get_param 1
33 struct xen_hvm_param {
34  domid_t domid; /* IN */
35  uint32_t index; /* IN */
36  uint64_t value; /* IN/OUT */
37 };
40 
41 /* Set the logical level of one of a domain's PCI INTx wires. */
42 #define HVMOP_set_pci_intx_level 2
44  /* Domain to be updated. */
46  /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
48  /* Assertion level (0 = unasserted, 1 = asserted). */
50 };
53 
54 /* Set the logical level of one of a domain's ISA IRQ wires. */
55 #define HVMOP_set_isa_irq_level 3
57  /* Domain to be updated. */
59  /* ISA device identification, by ISA IRQ (0-15). */
61  /* Assertion level (0 = unasserted, 1 = asserted). */
63 };
66 
67 #define HVMOP_set_pci_link_route 4
69  /* Domain to be updated. */
71  /* PCI link identifier (0-3). */
73  /* ISA IRQ (1-15), or 0 (disable link). */
75 };
78 
79 /* Flushes all VCPU TLBs: @arg must be NULL. */
80 #define HVMOP_flush_tlbs 5
81 
82 typedef enum {
83  HVMMEM_ram_rw, /* Normal read/write guest RAM */
84  HVMMEM_ram_ro, /* Read-only; writes are discarded */
85  HVMMEM_mmio_dm, /* Reads and write go to the device model */
87 
88 /* Following tools-only interfaces may change in future. */
89 #if defined(__XEN__) || defined(__XEN_TOOLS__)
90 
91 /* Track dirty VRAM. */
92 #define HVMOP_track_dirty_vram 6
93 struct xen_hvm_track_dirty_vram {
94  /* Domain to be tracked. */
95  domid_t domid;
96  /* Number of pages to track. */
97  uint32_t nr;
98  /* First pfn to track. */
99  uint64_aligned_t first_pfn;
100  /* OUT variable. */
101  /* Dirty bitmap buffer. */
102  XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
103 };
104 typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
105 DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
106 
107 /* Notify that some pages got modified by the Device Model. */
108 #define HVMOP_modified_memory 7
109 struct xen_hvm_modified_memory {
110  /* Domain to be updated. */
111  domid_t domid;
112  /* Number of pages. */
113  uint32_t nr;
114  /* First pfn. */
115  uint64_aligned_t first_pfn;
116 };
117 typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
118 DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
119 
120 #define HVMOP_set_mem_type 8
121 /* Notify that a region of memory is to be treated in a specific way. */
122 struct xen_hvm_set_mem_type {
123  /* Domain to be updated. */
124  domid_t domid;
125  /* Memory type */
126  uint16_t hvmmem_type;
127  /* Number of pages. */
128  uint32_t nr;
129  /* First pfn. */
130  uint64_aligned_t first_pfn;
131 };
132 typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
133 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
134 
135 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
136 
137 /* Hint from PV drivers for pagetable destruction. */
138 #define HVMOP_pagetable_dying 9
140  /* Domain with a pagetable about to be destroyed. */
142  uint16_t pad[3]; /* align next field on 8-byte boundary */
143  /* guest physical address of the toplevel pagetable dying */
145 };
148 
149 /* Get the current Xen time, in nanoseconds since system boot. */
150 #define HVMOP_get_time 10
152  uint64_t now; /* OUT */
153 };
156 
157 #define HVMOP_xentrace 11
161 };
164 
165 /* Following tools-only interfaces may change in future. */
166 #if defined(__XEN__) || defined(__XEN_TOOLS__)
167 
168 /* Deprecated by XENMEM_access_op_set_access */
169 #define HVMOP_set_mem_access 12
170 
171 /* Deprecated by XENMEM_access_op_get_access */
172 #define HVMOP_get_mem_access 13
173 
174 #define HVMOP_inject_trap 14
175 /* Inject a trap into a VCPU, which will get taken up on the next
176  * scheduling of it. Note that the caller should know enough of the
177  * state of the CPU before injecting, to know what the effect of
178  * injecting the trap will be.
179  */
180 struct xen_hvm_inject_trap {
181  /* Domain to be queried. */
182  domid_t domid;
183  /* VCPU */
184  uint32_t vcpuid;
185  /* Vector number */
187  /* Trap type (HVMOP_TRAP_*) */
188  uint32_t type;
189 /* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
190 # define HVMOP_TRAP_ext_int 0 /* external interrupt */
191 # define HVMOP_TRAP_nmi 2 /* nmi */
192 # define HVMOP_TRAP_hw_exc 3 /* hardware exception */
193 # define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */
194 # define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
195 # define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */
196  /* Error code, or ~0u to skip */
197  uint32_t error_code;
198  /* Intruction length */
199  uint32_t insn_len;
200  /* CR2 for page faults */
201  uint64_aligned_t cr2;
202 };
203 typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
204 DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
205 
206 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
207 
208 #define HVMOP_get_mem_type 15
209 /* Return hvmmem_type_t for the specified pfn. */
211  /* Domain to be queried. */
213  /* OUT variable. */
215  uint16_t pad[2]; /* align next field on 8-byte boundary */
216  /* IN variable. */
218 };
221 
222 /* Following tools-only interfaces may change in future. */
223 #if defined(__XEN__) || defined(__XEN_TOOLS__)
224 
225 /* MSI injection for emulated devices */
226 #define HVMOP_inject_msi 16
227 struct xen_hvm_inject_msi {
228  /* Domain to be injected */
229  domid_t domid;
230  /* Data -- lower 32 bits */
231  uint32_t data;
232  /* Address (0xfeexxxxx) */
233  uint64_t addr;
234 };
235 typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
236 DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
237 
238 /*
239  * IOREQ Servers
240  *
241  * The interface between an I/O emulator an Xen is called an IOREQ Server.
242  * A domain supports a single 'legacy' IOREQ Server which is instantiated if
243  * parameter...
244  *
245  * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
246  * ioreq structures), or...
247  * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
248  * ioreq ring), or...
249  * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
250  * to request buffered I/O emulation).
251  *
252  * The following hypercalls facilitate the creation of IOREQ Servers for
253  * 'secondary' emulators which are invoked to implement port I/O, memory, or
254  * PCI config space ranges which they explicitly register.
255  */
256 
257 typedef uint16_t ioservid_t;
258 
259 /*
260  * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
261  * emulator servicing domain <domid>.
262  *
263  * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
264  * the buffered ioreq ring will not be allocated and hence all emulation
265  * requestes to this server will be synchronous.
266  */
267 #define HVMOP_create_ioreq_server 17
268 struct xen_hvm_create_ioreq_server {
269  domid_t domid; /* IN - domain to be serviced */
270  uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
271  ioservid_t id; /* OUT - server id */
272 };
273 typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
274 DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
275 
276 /*
277  * HVMOP_get_ioreq_server_info: Get all the information necessary to access
278  * IOREQ Server <id>.
279  *
280  * The emulator needs to map the synchronous ioreq structures and buffered
281  * ioreq ring (if it exists) that Xen uses to request emulation. These are
282  * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
283  * respectively. In addition, if the IOREQ Server is handling buffered
284  * emulation requests, the emulator needs to bind to event channel
285  * <bufioreq_port> to listen for them. (The event channels used for
286  * synchronous emulation requests are specified in the per-CPU ioreq
287  * structures in <ioreq_pfn>).
288  * If the IOREQ Server is not handling buffered emulation requests then the
289  * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
290  */
291 #define HVMOP_get_ioreq_server_info 18
292 struct xen_hvm_get_ioreq_server_info {
293  domid_t domid; /* IN - domain to be serviced */
294  ioservid_t id; /* IN - server id */
295  evtchn_port_t bufioreq_port; /* OUT - buffered ioreq port */
296  uint64_aligned_t ioreq_pfn; /* OUT - sync ioreq pfn */
297  uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
298 };
299 typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
300 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
301 
302 /*
303  * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
304  * for emulation by the client of IOREQ
305  * Server <id>
306  * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
307  * for emulation by the client of IOREQ
308  * Server <id>
309  *
310  * There are three types of I/O that can be emulated: port I/O, memory accesses
311  * and PCI config space accesses. The <type> field denotes which type of range
312  * the <start> and <end> (inclusive) fields are specifying.
313  * PCI config space ranges are specified by segment/bus/device/function values
314  * which should be encoded using the HVMOP_PCI_SBDF helper macro below.
315  *
316  * NOTE: unless an emulation request falls entirely within a range mapped
317  * by a secondary emulator, it will not be passed to that emulator.
318  */
319 #define HVMOP_map_io_range_to_ioreq_server 19
320 #define HVMOP_unmap_io_range_from_ioreq_server 20
321 struct xen_hvm_io_range {
322  domid_t domid; /* IN - domain to be serviced */
323  ioservid_t id; /* IN - server id */
324  uint32_t type; /* IN - type of range */
325 # define HVMOP_IO_RANGE_PORT 0 /* I/O port range */
326 # define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
327 # define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
328  uint64_aligned_t start, end; /* IN - inclusive start and end of range */
329 };
330 typedef struct xen_hvm_io_range xen_hvm_io_range_t;
331 DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
332 
333 #define HVMOP_PCI_SBDF(s,b,d,f) \
334  ((((s) & 0xffff) << 16) | \
335  (((b) & 0xff) << 8) | \
336  (((d) & 0x1f) << 3) | \
337  ((f) & 0x07))
338 
339 /*
340  * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
341  * <domid>.
342  *
343  * Any registered I/O ranges will be automatically deregistered.
344  */
345 #define HVMOP_destroy_ioreq_server 21
346 struct xen_hvm_destroy_ioreq_server {
347  domid_t domid; /* IN - domain to be serviced */
348  ioservid_t id; /* IN - server id */
349 };
350 typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
351 DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
352 
353 /*
354  * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
355  * domain <domid>.
356  *
357  * The IOREQ Server will not be passed any emulation requests until it is in the
358  * enabled state.
359  * Note that the contents of the ioreq_pfn and bufioreq_fn (see
360  * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
361  * the enabled state.
362  */
363 #define HVMOP_set_ioreq_server_state 22
364 struct xen_hvm_set_ioreq_server_state {
365  domid_t domid; /* IN - domain to be serviced */
366  ioservid_t id; /* IN - server id */
367  uint8_t enabled; /* IN - enabled? */
368 };
369 typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
370 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
371 
372 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
373 
374 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
375 
376 /*
377  * Local variables:
378  * mode: C
379  * c-file-style: "BSD"
380  * c-basic-offset: 4
381  * tab-width: 4
382  * indent-tabs-mode: nil
383  * End:
384  */
unsigned short uint16_t
Definition: stdint.h:11
uint16_t domid_t
Definition: xen.h:495
uint32_t vector
MSI-X vector.
Definition: ena.h:20
uint16_t mem_type
Definition: hvm_op.h:214
uint16_t pad[2]
Definition: hvm_op.h:215
uint8_t type
Type.
Definition: ena.h:16
uint32_t index
Definition: hvm_op.h:35
unsigned long long uint64_t
Definition: stdint.h:13
DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t)
uint32_t evtchn_port_t
Definition: event_channel.h:81
FILE_LICENCE(MIT)
uint8_t uint8
Definition: stdint.h:28
uint32_t start
Starting offset.
Definition: netvsc.h:12
uint64_t now
Definition: hvm_op.h:152
uint64_t value
Definition: hvm_op.h:36
domid_t domid
Definition: hvm_op.h:34
uint16_t extra_bytes
Definition: hvm_op.h:159
uint8_t id
Request identifier.
Definition: ena.h:12
#define uint64_aligned_t
Definition: arch-arm.h:175
static grant_ref_t domid_t domid
Definition: xengrant.h:173
u32 addr
Definition: sky2.h:8
unsigned char uint8_t
Definition: stdint.h:10
hvmmem_type_t
Definition: hvm_op.h:82
unsigned int uint32_t
Definition: stdint.h:12
uint16_t pad[3]
Definition: hvm_op.h:142
uint8_t extra[TRACE_EXTRA_MAX *sizeof(uint32_t)]
Definition: hvm_op.h:160
uint32_t end
Ending offset.
Definition: netvsc.h:18
struct arbelprm_port_state_change_st data
Message.
Definition: arbel.h:12
#define TRACE_EXTRA_MAX
Definition: trace.h:31
uint16_t event
Definition: hvm_op.h:159