Sophie

Sophie

distrib > Mageia > 7 > x86_64 > by-pkgid > 1956d551da2433756e2b94372d3a6181 > files > 89

xen-doc-4.12.1-1.mga7.noarch.rpm

<html><head><title>include/public/hvm/dm_op.h - arch-x86_32 - Xen public headers</title></head><body><pre>
/*
 * Copyright (c) 2016, Citrix Systems Inc
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 */

#ifndef __XEN_PUBLIC_HVM_DM_OP_H__
#define __XEN_PUBLIC_HVM_DM_OP_H__

#include "../xen.h"

#if defined(__XEN__) || defined(__XEN_TOOLS__)

#include "../event_channel.h"

#ifndef uint64_aligned_t
#define uint64_aligned_t uint64_t
#endif

/*
 * IOREQ Servers
 *
 * The interface between an I/O emulator an Xen is called an IOREQ Server.
 * A domain supports a single 'legacy' IOREQ Server which is instantiated if
 * parameter...
 *
 * HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
 * ioreq structures), or...
 * HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
 * ioreq ring), or...
 * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
 * to request buffered I/O emulation).
 *
 * The following hypercalls facilitate the creation of IOREQ Servers for
 * 'secondary' emulators which are invoked to implement port I/O, memory, or
 * PCI config space ranges which they explicitly register.
 */

typedef uint16_t <a  name="Typedef_ioservid_t"><strong>ioservid_t</strong></a>;

/*
 * XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
 *                               secondary emulator.
 *
 * The &lt;id&gt; handed back is unique for target domain. The valur of
 * &lt;handle_bufioreq&gt; should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
 * hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then  the buffered
 * ioreq ring will not be allocated and hence all emulation requests to
 * this server will be synchronous.
 */
#define XEN_DMOP_create_ioreq_server 1

<a  name="Struct_xen_dm_op_create_ioreq_server"><strong>struct xen_dm_op_create_ioreq_server</strong></a> {
    /* IN - should server handle buffered ioreqs */
    uint8_t handle_bufioreq;
    uint8_t pad[3];
    /* OUT - server id */
    <a href="include,public,hvm,dm_op.h.html#Typedef_ioservid_t">ioservid_t</a> id;
};

/*
 * XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
 *                                 access IOREQ Server &lt;id&gt;.
 *
 * If the IOREQ Server is handling buffered emulation requests, the
 * emulator needs to bind to event channel &lt;bufioreq_port&gt; to listen for
 * them. (The event channels used for synchronous emulation requests are
 * specified in the per-CPU ioreq structures).
 * In addition, if the XENMEM_acquire_resource memory op cannot be used,
 * the emulator will need to map the synchronous ioreq structures and
 * buffered ioreq ring (if it exists) from guest memory. If &lt;flags&gt; does
 * not contain XEN_DMOP_no_gfns then these pages will be made available and
 * the frame numbers passed back in gfns &lt;ioreq_gfn&gt; and &lt;bufioreq_gfn&gt;
 * respectively. (If the IOREQ Server is not handling buffered emulation
 * only &lt;ioreq_gfn&gt; will be valid).
 *
 * NOTE: To access the synchronous ioreq structures and buffered ioreq
 *       ring, it is preferable to use the XENMEM_acquire_resource memory
 *       op specifying resource type XENMEM_resource_ioreq_server.
 */
#define XEN_DMOP_get_ioreq_server_info 2

<a  name="Struct_xen_dm_op_get_ioreq_server_info"><strong>struct xen_dm_op_get_ioreq_server_info</strong></a> {
    /* IN - server id */
    <a href="include,public,hvm,dm_op.h.html#Typedef_ioservid_t">ioservid_t</a> id;
    /* IN - flags */
    uint16_t flags;

#define _XEN_DMOP_no_gfns 0
#define XEN_DMOP_no_gfns (1u &lt;&lt; _XEN_DMOP_no_gfns)

    /* OUT - buffered ioreq port */
    <a href="include,public,event_channel.h.html#Typedef_evtchn_port_t">evtchn_port_t</a> bufioreq_port;
    /* OUT - sync ioreq gfn (see block comment above) */
    uint64_aligned_t ioreq_gfn;
    /* OUT - buffered ioreq gfn (see block comment above)*/
    uint64_aligned_t bufioreq_gfn;
};

/*
 * XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
 *                                        emulation by the client of
 *                                        IOREQ Server &lt;id&gt;.
 * XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
 *                                            previously registered for
 *                                            emulation by the client of
 *                                            IOREQ Server &lt;id&gt;.
 *
 * There are three types of I/O that can be emulated: port I/O, memory
 * accesses and PCI config space accesses. The &lt;type&gt; field denotes which
 * type of range* the &lt;start&gt; and &lt;end&gt; (inclusive) fields are specifying.
 * PCI config space ranges are specified by segment/bus/device/function
 * values which should be encoded using the DMOP_PCI_SBDF helper macro
 * below.
 *
 * NOTE: unless an emulation request falls entirely within a range mapped
 * by a secondary emulator, it will not be passed to that emulator.
 */
#define XEN_DMOP_map_io_range_to_ioreq_server 3
#define XEN_DMOP_unmap_io_range_from_ioreq_server 4

<a  name="Struct_xen_dm_op_ioreq_server_range"><strong>struct xen_dm_op_ioreq_server_range</strong></a> {
    /* IN - server id */
    <a href="include,public,hvm,dm_op.h.html#Typedef_ioservid_t">ioservid_t</a> id;
    uint16_t pad;
    /* IN - type of range */
    uint32_t type;
# define XEN_DMOP_IO_RANGE_PORT   0 /* I/O port range */
# define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
# define XEN_DMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
    /* IN - inclusive start and end of range */
    uint64_aligned_t start, end;
};

#define XEN_DMOP_PCI_SBDF(s,b,d,f) \
	((((s) &amp; 0xffff) &lt;&lt; 16) |  \
	 (((b) &amp; 0xff) &lt;&lt; 8) |     \
	 (((d) &amp; 0x1f) &lt;&lt; 3) |     \
	 ((f) &amp; 0x07))

/*
 * XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server &lt;id&gt;
 *
 * The IOREQ Server will not be passed any emulation requests until it is
 * in the enabled state.
 * Note that the contents of the ioreq_gfn and bufioreq_gfn (see
 * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
 * is in the enabled state.
 */
#define XEN_DMOP_set_ioreq_server_state 5

<a  name="Struct_xen_dm_op_set_ioreq_server_state"><strong>struct xen_dm_op_set_ioreq_server_state</strong></a> {
    /* IN - server id */
    <a href="include,public,hvm,dm_op.h.html#Typedef_ioservid_t">ioservid_t</a> id;
    /* IN - enabled? */
    uint8_t enabled;
    uint8_t pad;
};

/*
 * XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server &lt;id&gt;.
 *
 * Any registered I/O ranges will be automatically deregistered.
 */
#define XEN_DMOP_destroy_ioreq_server 6

<a  name="Struct_xen_dm_op_destroy_ioreq_server"><strong>struct xen_dm_op_destroy_ioreq_server</strong></a> {
    /* IN - server id */
    <a href="include,public,hvm,dm_op.h.html#Typedef_ioservid_t">ioservid_t</a> id;
    uint16_t pad;
};

/*
 * XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn
 *                            range.
 *
 * NOTE: The bitmap passed back to the caller is passed in a
 *       secondary buffer.
 */
#define XEN_DMOP_track_dirty_vram 7

<a  name="Struct_xen_dm_op_track_dirty_vram"><strong>struct xen_dm_op_track_dirty_vram</strong></a> {
    /* IN - number of pages to be tracked */
    uint32_t nr;
    uint32_t pad;
    /* IN - first pfn to track */
    uint64_aligned_t first_pfn;
};

/*
 * XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's
 *                              PCI INTx pins.
 */
#define XEN_DMOP_set_pci_intx_level 8

<a  name="Struct_xen_dm_op_set_pci_intx_level"><strong>struct xen_dm_op_set_pci_intx_level</strong></a> {
    /* IN - PCI INTx identification (domain:bus:device:intx) */
    uint16_t domain;
    uint8_t bus, device, intx;
    /* IN - Level: 0 -&gt; deasserted, 1 -&gt; asserted */
    uint8_t  level;
};

/*
 * XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
 *                             ISA IRQ lines.
 */
#define XEN_DMOP_set_isa_irq_level 9

<a  name="Struct_xen_dm_op_set_isa_irq_level"><strong>struct xen_dm_op_set_isa_irq_level</strong></a> {
    /* IN - ISA IRQ (0-15) */
    uint8_t  isa_irq;
    /* IN - Level: 0 -&gt; deasserted, 1 -&gt; asserted */
    uint8_t  level;
};

/*
 * XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
 */
#define XEN_DMOP_set_pci_link_route 10

<a  name="Struct_xen_dm_op_set_pci_link_route"><strong>struct xen_dm_op_set_pci_link_route</strong></a> {
    /* PCI INTx line (0-3) */
    uint8_t  link;
    /* ISA IRQ (1-15) or 0 -&gt; disable link */
    uint8_t  isa_irq;
};

/*
 * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
 *                           an emulator.
 *
 * DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
 * @nr_extents entries.
 *
 * On error, @nr_extents will contain the index+1 of the extent that
 * had the error.  It is not defined if or which pages may have been
 * marked as dirty, in this event.
 */
#define XEN_DMOP_modified_memory 11

<a  name="Struct_xen_dm_op_modified_memory"><strong>struct xen_dm_op_modified_memory</strong></a> {
    /*
     * IN - Number of extents to be processed
     * OUT -returns n+1 for failing extent
     */
    uint32_t nr_extents;
    /* IN/OUT - Must be set to 0 */
    uint32_t opaque;
};

struct xen_dm_op_modified_memory_extent {
    /* IN - number of contiguous pages modified */
    uint32_t nr;
    uint32_t pad;
    /* IN - first pfn modified */
    uint64_aligned_t first_pfn;
};

/*
 * XEN_DMOP_set_mem_type: Notify that a region of memory is to be treated
 *                        in a specific way. (See definition of
 *                        hvmmem_type_t).
 *
 * NOTE: In the event of a continuation (return code -ERESTART), the
 *       @first_pfn is set to the value of the pfn of the remaining
 *       region and @nr reduced to the size of the remaining region.
 */
#define XEN_DMOP_set_mem_type 12

<a  name="Struct_xen_dm_op_set_mem_type"><strong>struct xen_dm_op_set_mem_type</strong></a> {
    /* IN - number of contiguous pages */
    uint32_t nr;
    /* IN - new hvmmem_type_t of region */
    uint16_t mem_type;
    uint16_t pad;
    /* IN - first pfn in region */
    uint64_aligned_t first_pfn;
};

/*
 * XEN_DMOP_inject_event: Inject an event into a VCPU, which will
 *                        get taken up when it is next scheduled.
 *
 * Note that the caller should know enough of the state of the CPU before
 * injecting, to know what the effect of injecting the event will be.
 */
#define XEN_DMOP_inject_event 13

<a  name="Struct_xen_dm_op_inject_event"><strong>struct xen_dm_op_inject_event</strong></a> {
    /* IN - index of vCPU */
    uint32_t vcpuid;
    /* IN - interrupt vector */
    uint8_t vector;
    /* IN - event type (DMOP_EVENT_* ) */
    uint8_t type;
/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
# define XEN_DMOP_EVENT_ext_int    0 /* external interrupt */
# define XEN_DMOP_EVENT_nmi        2 /* nmi */
# define XEN_DMOP_EVENT_hw_exc     3 /* hardware exception */
# define XEN_DMOP_EVENT_sw_int     4 /* software interrupt (CD nn) */
# define XEN_DMOP_EVENT_pri_sw_exc 5 /* ICEBP (F1) */
# define XEN_DMOP_EVENT_sw_exc     6 /* INT3 (CC), INTO (CE) */
    /* IN - instruction length */
    uint8_t insn_len;
    uint8_t pad0;
    /* IN - error code (or ~0 to skip) */
    uint32_t error_code;
    uint32_t pad1;
    /* IN - CR2 for page faults */
    uint64_aligned_t cr2;
};

/*
 * XEN_DMOP_inject_msi: Inject an MSI for an emulated device.
 */
#define XEN_DMOP_inject_msi 14

<a  name="Struct_xen_dm_op_inject_msi"><strong>struct xen_dm_op_inject_msi</strong></a> {
    /* IN - MSI data (lower 32 bits) */
    uint32_t data;
    uint32_t pad;
    /* IN - MSI address (0xfeexxxxx) */
    uint64_aligned_t addr;
};

/*
 * XEN_DMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server &lt;id&gt;
 *                                      to specific memory type &lt;type&gt;
 *                                      for specific accesses &lt;flags&gt;
 *
 * For now, flags only accept the value of XEN_DMOP_IOREQ_MEM_ACCESS_WRITE,
 * which means only write operations are to be forwarded to an ioreq server.
 * Support for the emulation of read operations can be added when an ioreq
 * server has such requirement in future.
 */
#define XEN_DMOP_map_mem_type_to_ioreq_server 15

<a  name="Struct_xen_dm_op_map_mem_type_to_ioreq_server"><strong>struct xen_dm_op_map_mem_type_to_ioreq_server</strong></a> {
    <a href="include,public,hvm,dm_op.h.html#Typedef_ioservid_t">ioservid_t</a> id;      /* IN - ioreq server id */
    uint16_t type;      /* IN - memory type */
    uint32_t flags;     /* IN - types of accesses to be forwarded to the
                           ioreq server. flags with 0 means to unmap the
                           ioreq server */

#define XEN_DMOP_IOREQ_MEM_ACCESS_READ (1u &lt;&lt; 0)
#define XEN_DMOP_IOREQ_MEM_ACCESS_WRITE (1u &lt;&lt; 1)

    uint64_t opaque;    /* IN/OUT - only used for hypercall continuation,
                           has to be set to zero by the caller */
};

/*
 * XEN_DMOP_remote_shutdown : Declare a shutdown for another domain
 *                            Identical to SCHEDOP_remote_shutdown
 */
#define XEN_DMOP_remote_shutdown 16

<a  name="Struct_xen_dm_op_remote_shutdown"><strong>struct xen_dm_op_remote_shutdown</strong></a> {
    uint32_t reason;       /* SHUTDOWN_* =&gt; <a href="include,public,sched.h.html#Enum_sched_shutdown_reason">enum sched_shutdown_reason</a> */
                           /* (Other reason values are not blocked) */
};

/*
 * XEN_DMOP_relocate_memory : Relocate GFNs for the specified guest.
 *                            Identical to XENMEM_add_to_physmap with
 *                            space == XENMAPSPACE_gmfn_range.
 */
#define XEN_DMOP_relocate_memory 17

<a  name="Struct_xen_dm_op_relocate_memory"><strong>struct xen_dm_op_relocate_memory</strong></a> {
    /* All fields are IN/OUT, with their OUT state undefined. */
    /* Number of GFNs to process. */
    uint32_t size;
    uint32_t pad;
    /* Starting GFN to relocate. */
    uint64_aligned_t src_gfn;
    /* Starting GFN where GFNs should be relocated. */
    uint64_aligned_t dst_gfn;
};

/*
 * XEN_DMOP_pin_memory_cacheattr : Pin caching type of RAM space.
 *                                 Identical to XEN_DOMCTL_pin_mem_cacheattr.
 */
#define XEN_DMOP_pin_memory_cacheattr 18

<a  name="Struct_xen_dm_op_pin_memory_cacheattr"><strong>struct xen_dm_op_pin_memory_cacheattr</strong></a> {
    uint64_aligned_t start; /* Start gfn. */
    uint64_aligned_t end;   /* End gfn. */
/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
#define XEN_DMOP_MEM_CACHEATTR_UC  0
#define XEN_DMOP_MEM_CACHEATTR_WC  1
#define XEN_DMOP_MEM_CACHEATTR_WT  4
#define XEN_DMOP_MEM_CACHEATTR_WP  5
#define XEN_DMOP_MEM_CACHEATTR_WB  6
#define XEN_DMOP_MEM_CACHEATTR_UCM 7
#define XEN_DMOP_DELETE_MEM_CACHEATTR (~(uint32_t)0)
    uint32_t type;          /* XEN_DMOP_MEM_CACHEATTR_* */
    uint32_t pad;
};

struct xen_dm_op {
    uint32_t op;
    uint32_t pad;
    union {
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_create_ioreq_server">struct xen_dm_op_create_ioreq_server</a> create_ioreq_server;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_get_ioreq_server_info">struct xen_dm_op_get_ioreq_server_info</a> get_ioreq_server_info;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_ioreq_server_range">struct xen_dm_op_ioreq_server_range</a> map_io_range_to_ioreq_server;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_ioreq_server_range">struct xen_dm_op_ioreq_server_range</a> unmap_io_range_from_ioreq_server;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_set_ioreq_server_state">struct xen_dm_op_set_ioreq_server_state</a> set_ioreq_server_state;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_destroy_ioreq_server">struct xen_dm_op_destroy_ioreq_server</a> destroy_ioreq_server;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_track_dirty_vram">struct xen_dm_op_track_dirty_vram</a> track_dirty_vram;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_set_pci_intx_level">struct xen_dm_op_set_pci_intx_level</a> set_pci_intx_level;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_set_isa_irq_level">struct xen_dm_op_set_isa_irq_level</a> set_isa_irq_level;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_set_pci_link_route">struct xen_dm_op_set_pci_link_route</a> set_pci_link_route;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_modified_memory">struct xen_dm_op_modified_memory</a> modified_memory;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_set_mem_type">struct xen_dm_op_set_mem_type</a> set_mem_type;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_inject_event">struct xen_dm_op_inject_event</a> inject_event;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_inject_msi">struct xen_dm_op_inject_msi</a> inject_msi;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_map_mem_type_to_ioreq_server">struct xen_dm_op_map_mem_type_to_ioreq_server</a>
                map_mem_type_to_ioreq_server;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_remote_shutdown">struct xen_dm_op_remote_shutdown</a> remote_shutdown;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_relocate_memory">struct xen_dm_op_relocate_memory</a> relocate_memory;
        <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_pin_memory_cacheattr">struct xen_dm_op_pin_memory_cacheattr</a> pin_memory_cacheattr;
    } u;
};

#endif /* __XEN__ || __XEN_TOOLS__ */

<a  name="Struct_xen_dm_op_buf"><strong>struct xen_dm_op_buf</strong></a> {
    XEN_GUEST_HANDLE(void) h;
    xen_ulong_t size;
};
typedef <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_buf">struct xen_dm_op_buf</a> <a  name="Typedef_xen_dm_op_buf_t"><strong>xen_dm_op_buf_t</strong></a>;
DEFINE_XEN_GUEST_HANDLE(<a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_buf">xen_dm_op_buf_t</a>);

/* ` <a href="include,public,errno.h.html#Enum_neg_errnoval">enum neg_errnoval</a>
 * ` <a  name="Func_HYPERVISOR_dm_op"><strong>HYPERVISOR_dm_op</strong></a>(<a href="include,public,xen.h.html#Typedef_domid_t">domid_t</a> domid,
 * `                  unsigned int nr_bufs,
 * `                  <a href="include,public,hvm,dm_op.h.html#Struct_xen_dm_op_buf">xen_dm_op_buf_t</a> bufs[])
 * ` [see <a href="include,public,xen.h.html#EnumVal___HYPERVISOR_dm_op">__HYPERVISOR_dm_op</a>]
 * `
 *
 * @domid is the domain the hypercall operates on.
 * @nr_bufs is the number of buffers in the @bufs array.
 * @bufs points to an array of buffers where @bufs[0] contains a struct
 * xen_dm_op, describing the specific device model operation and its
 * parameters.
 * @bufs[1..] may be referenced in the parameters for the purposes of
 * passing extra information to or from the domain.
 */

#endif /* __XEN_PUBLIC_HVM_DM_OP_H__ */

/*
 * Local variables:
 * mode: C
 * c-file-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */
</pre></body></html>