xc_domain_save.c

本文详细介绍了如何修改Xen3.4的'xc_domain_save.c'文件,以测量内存脏页率。通过在代码中加入测试部分,作者能够在运行过程中收集并记录内存使用情况,特别是关注于哪些页面被频繁修改。测试结果通常会保存在/var/log/xen/xend.log中,便于后续分析和优化。示例文件TPCC.txt提供了一个具体的测量案例。


/******************************************************************************
 * xc_linux_save.c
 *
 * Save the state of a running Linux session.
 *
 * Copyright (c) 2003, K A Fraser.
 */

#include <inttypes.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>

#include "xc_private.h"
#include "xc_dom.h"
#include "xg_private.h"
#include "xg_save_restore.h"

#include <xen/hvm/params.h>
#include "xc_e820.h"

/*
** Default values for important tuning parameters. Can override by passing
** non-zero replacement values to xc_domain_save().
**
** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
**
*/
#define DEF_MAX_ITERS   29   /* limit us to 30 times round loop   */
#define DEF_MAX_FACTOR   3   /* never send more than 3x p2m_size  */
/* #define ADAPTIVE_SAVE */ 
#define TEST_WWS 
#define TEST_MEMORY_DIRTY 

/* max mfn of the whole machine */
static unsigned long max_mfn;

/* virtual starting address of the hypervisor */
static unsigned long hvirt_start;

/* #levels of page tables used by the current guest */
static unsigned int pt_levels;

/* HVM: shared-memory bitmaps for getting log-dirty bits from qemu-dm */
static unsigned long *qemu_bitmaps[2];
static int qemu_active;
static int qemu_non_active;

/* number of pfns this guest has (i.e. number of entries in the P2M) */
static unsigned long p2m_size;

/* Live mapping of the table mapping each PFN to its current MFN. */
static xen_pfn_t *live_p2m = NULL;

/* Live mapping of system MFN to PFN table. */
static xen_pfn_t *live_m2p = NULL;
static unsigned long m2p_mfn0;

/* Address size of the guest */
unsigned int guest_width;

/* grep fodder: machine_to_phys */

#define mfn_to_pfn(_mfn)  (live_m2p[(_mfn)])

#define pfn_to_mfn(_pfn)                                            \
  ((xen_pfn_t) ((guest_width==8)                                    \
                ? (((uint64_t *)live_p2m)[(_pfn)])                  \
                : ((((uint32_t *)live_p2m)[(_pfn)]) == 0xffffffffU  \
                   ? (-1UL) : (((uint32_t *)live_p2m)[(_pfn)]))))

/*
 * Returns TRUE if the given machine frame number has a unique mapping
 * in the guest's pseudophysical map.
 */
#define MFN_IS_IN_PSEUDOPHYS_MAP(_mfn)          \
    (((_mfn) < (max_mfn)) &&                    \
     ((mfn_to_pfn(_mfn) < (p2m_size)) &&        \
      (pfn_to_mfn(mfn_to_pfn(_mfn)) == (_mfn))))

/*
** During (live) save/migrate, we maintain a number of bitmaps to track
** which pages we have to send, to fixup, and to skip.
*/

#define BITS_PER_LONG (sizeof(unsigned long) * 8)
#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
#define BITMAP_SIZE   (BITS_TO_LONGS(p2m_size) * sizeof(unsigned long))

#define BITMAP_ENTRY(_nr,_bmap) \
   ((volatile unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]

#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)

static inline int test_bit (int nr, volatile void * addr)
{
    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
}

static inline void clear_bit (int nr, volatile void * addr)
{
    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
}

static inline void set_bit ( int nr, volatile void * addr)
{
    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
}

/* Returns the hamming weight (i.e. the number of bits set) in a N-bit word */
static inline unsigned int hweight32(unsigned int w)
{
    unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
    res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
    res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
    res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
    return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
}

static inline int count_bits ( int nr, volatile void *addr)
{
    int i, count = 0;
    volatile unsigned long *p = (volatile unsigned long *)addr;
    /* We know that the array is padded to unsigned long. */
    for ( i = 0; i < (nr / (sizeof(unsigned long)*8)); i++, p++ )
        count += hweight32(*p);
    return count;
}

static uint64_t tv_to_us(struct timeval *new)
{
    return (new->tv_sec * 1000000) + new->tv_usec;
}

static uint64_t llgettimeofday(void)
{
    struct timeval now;
    gettimeofday(&now, NULL);
    return tv_to_us(&now);
}

static uint64_t tv_delta(struct timeval *new, struct timeval *old)
{
    return (((new->tv_sec - old->tv_sec)*1000000) +
            (new->tv_usec - old->tv_usec));
}

static int noncached_write(int fd, int live, void *buffer, int len) 
{
    static int write_count = 0;
    int rc = (write_exact(fd, buffer, len) == 0) ? len : -1;

    write_count += len;
    if ( write_count >= (MAX_PAGECACHE_USAGE * PAGE_SIZE) )
    {
        /* Time to discard cache - dont care if this fails */
        discard_file_cache(fd, 0 /* no flush */);
        write_count = 0;
    }

    return rc;
}

#ifdef ADAPTIVE_SAVE

/*
** We control the rate at which we transmit (or save) to minimize impact
** on running domains (including the target if we're doing live migrate).
*/

#define MAX_MBIT_RATE    800      /* maximum transmit rate for migrate */
#define START_MBIT_RATE  400      /* initial transmit rate for migrate */

/* Scaling factor to convert between a rate (in Mb/s) and time (in usecs) */
#define RATE_TO_BTU      781250

/* Amount in bytes we allow ourselves to send in a burst */
#define BURST_BUDGET (100*1024)

/* We keep track of the current and previous transmission rate */
static int mbit_rate, ombit_rate = 0;

/* Have we reached the maximum transmission rate? */
#define RATE_IS_MAX() (mbit_rate == MAX_MBIT_RATE)

static inline void initialize_mbit_rate(void)
{
      mbit_rate = START_MBIT_RATE;  
      /*  mbit_rate= 500; */
      
      }

static int ratewrite(int io_fd, int live, void *buf, int n)
{
    static int budget = 0;
    static int burst_time_us = -1;
    static struct timeval last_put = { 0 };
    struct timeval now;
    struct timespec delay;
    long long delta;

    if ( START_MBIT_RATE == 0 )
        return noncached_write(io_fd, live, buf, n);

    budget -= n;
    if ( budget < 0 )
    {
        if ( mbit_rate != ombit_rate )
        {
            burst_time_us = RATE_TO_BTU / mbit_rate;
            ombit_rate = mbit_rate;
            DPRINTF("rate limit: %d mbit/s burst budget %d slot time %d\n",
                    mbit_rate, BURST_BUDGET, burst_time_us);
        }
        if ( last_put.tv_sec == 0 )
        {
            budget += BURST_BUDGET;
            gettimeofday(&last_put, NULL);
        }
        else
        {
            while ( budget < 0 )
            {
                gettimeofday(&now, NULL);
                delta = tv_delta(&now, &last_put);
                while ( delta > burst_time_us )
                {
                    budget += BURST_BUDGET;
                    last_put.tv_usec += burst_time_us;
                    if ( last_put.tv_usec > 1000000 )
                    {
                    }
                    delta -= burst_time_us;
                }
                if ( budget > 0 )
                    break;
                delay.tv_sec = 0;
                delay.tv_nsec = 1000 * (burst_time_us - delta);
                while ( delay.tv_nsec > 0 )
                    if ( nanosleep(&delay, &delay) == 0 )
                        break;
            }
        }
    }
    return noncached_write(io_fd, live, buf, n);
}

#else /* ! ADAPTIVE SAVE */

#define RATE_IS_MAX() (0)
#define ratewrite(_io_fd, _live, _buf, _n) noncached_write((_io_fd), (_live), (_buf), (_n))
#define initialize_mbit_rate()

#endif

static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
                       xc_shadow_op_stats_t *stats, int print)
{
    static struct timeval wall_last;
    static long long      d0_cpu_last;
    static long long      d1_cpu_last;

    struct timeval        wall_now;
    long long             wall_delta;
    long long             d0_cpu_now, d0_cpu_delta;
    long long             d1_cpu_now, d1_cpu_delta;

    gettimeofday(&wall_now, NULL);

    d0_cpu_now = xc_domain_get_cpu_usage(xc_handle, 0, /* FIXME */ 0)/1000;
    d1_cpu_now = xc_domain_get_cpu_usage(xc_handle, domid, /* FIXME */ 0)/1000;

    if ( (d0_cpu_now == -1) || (d1_cpu_now == -1) )
        DPRINTF("ARRHHH!!\n");

    wall_delta = tv_delta(&wall_now,&wall_last)/1000;
    if ( wall_delta == 0 )
        wall_delta = 1;

    d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000;
    d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000;

    if ( print )
        DPRINTF("delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
                "dirtied %dMb/s %" PRId32 " pages\n",
                wall_delta,
                (int)((d0_cpu_delta*100)/wall_delta),
                (int)((d1_cpu_delta*100)/wall_delta),
                (int)((pages_sent*PAGE_SIZE)/(wall_delta*(1000/8))),
                (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))),
                stats->dirty_count);

#ifdef ADAPTIVE_SAVE
    if ( ((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) > mbit_rate )
    {
      mbit_rate = (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) +200; 

    /*      mbit_rate = 900; */
       if ( mbit_rate > MAX_MBIT_RATE )
            mbit_rate = MAX_MBIT_RATE;
    }
#endif

    d0_cpu_last = d0_cpu_now;
    d1_cpu_last = d1_cpu_now;
    wall_last   = wall_now;

    return 0;
}


static int analysis_phase(int xc_handle, uint32_t domid, int p2m_size,
                          unsigned long *arr, int runs)
{
    long long start, now;
    xc_shadow_op_stats_t stats;
    int j;

    start = llgettimeofday();

    for ( j = 0; j < runs; j++ )
    {
        int i;

        xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
                          arr, p2m_size, NULL, 0, NULL);
        DPRINTF("#Flush\n");
        for ( i = 0; i < 40; i++ )
        {
            usleep(50000);
            now = llgettimeofday();
            xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_PEEK,
                              NULL, 0, NULL, 0, &stats);
            DPRINTF("now= %lld faults= %"PRId32" dirty= %"PRId32"\n",
                    ((now-start)+500)/1000,
                    stats.fault_count, stats.dirty_count);
        }
    }

    return -1;
}


static int suspend_and_state(int (*suspend)(void), int xc_handle, int io_fd,
                             int dom, xc_dominfo_t *info)
{
    if ( !(*suspend)() )
    {
        ERROR("Suspend request failed");
        return -1;
    }

    if ( (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) ||
         !info->shutdown || (info->shutdown_reason != SHUTDOWN_suspend) )
    {
        ERROR("Domain not in suspended state");
        return -1;
    }

    return 0;
}

/*
** Map the top-level page of MFNs from the guest. The guest might not have
** finished resuming from a previous restore operation, so we wait a while for
** it to update the MFN to a reasonable value.
*/
static void *map_frame_list_list(int xc_handle, uint32_t dom,
                                 shared_info_any_t *shinfo)
{
    int count = 100;
    void *p;
    uint64_t fll = GET_FIELD(shinfo, arch.pfn_to_mfn_frame_list_list);

    while ( count-- && (fll == 0) )
    {
        usleep(10000);
        fll = GET_FIELD(shinfo, arch.pfn_to_mfn_frame_list_list);
    }

    if ( fll == 0 )
    {
        ERROR("Timed out waiting for frame list updated.");
        return NULL;
    }

    p = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ, fll);
    if ( p == NULL )
        ERROR("Couldn't map p2m_frame_list_list (errno %d)", errno);

    return p;
}

/*
** During transfer (or in the state file), all page-table pages must be
** converted into a 'canonical' form where references to actual mfns
** are replaced with references to the corresponding pfns.
**
** This function performs the appropriate conversion, taking into account
** which entries do not require canonicalization (in particular, those
** entries which map the virtual address reserved for the hypervisor).
*/
static int canonicalize_pagetable(unsigned long type, unsigned long pfn,
                           const void *spage, void *dpage)
{

    int i, pte_last, xen_start, xen_end, race = 0; 
    uint64_t pte;

    /*
    ** We need to determine which entries in this page table hold
    ** reserved hypervisor mappings. This depends on the current
    ** page table type as well as the number of paging levels.
    */
    xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2) ? 4 : 8);

    if ( (pt_levels == 2) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
        xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);

    if ( (pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L3TAB) )
        xen_start = L3_PAGETABLE_ENTRIES_PAE;

    /*
    ** In PAE only the L2 mapping the top 1GB contains Xen mappings.
    ** We can spot this by looking for the guest's mappingof the m2p.
    ** Guests must ensure that this check will fail for other L2s.
    */
    if ( (pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
    {
        int hstart;
        uint64_t he;

        hstart = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
        he = ((const uint64_t *) spage)[hstart];

        if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 )
        {
            /* hvirt starts with xen stuff... */
            xen_start = hstart;
        }
        else if ( hvirt_start != 0xf5800000 )
        {
            /* old L2s from before hole was shrunk... */
            hstart = (0xf5800000 >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
            he = ((const uint64_t *) spage)[hstart];
            if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86) == m2p_mfn0 )
                xen_start = hstart;
        }
    }

    if ( (pt_levels == 4) && (type == XEN_DOMCTL_PFINFO_L4TAB) )
    {
        /*
        ** XXX SMH: should compute these from hvirt_start (which we have)
        ** and hvirt_end (which we don't)
        */
        xen_start = 256;
        xen_end   = 272;
    }

    /* Now iterate through the page table, canonicalizing each PTE */
    for (i = 0; i < pte_last; i++ )
    {
        unsigned long pfn, mfn;

        if ( pt_levels == 2 )
            pte = ((const uint32_t*)spage)[i];
        else
            pte = ((const uint64_t*)spage)[i];

        if ( (i >= xen_start) && (i < xen_end) )
            pte = 0;

        if ( pte & _PAGE_PRESENT )
        {
            mfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
            {
                /* This will happen if the type info is stale which
                   is quite feasible under live migration */
                pfn  = 0;  /* zap it - we'll retransmit this page later */
                /* XXX: We can't spot Xen mappings in compat-mode L2es 
                 * from 64-bit tools, but the only thing in them is the
                 * compat m2p, so we quietly zap them.  This doesn't
                 * count as a race, so don't report it. */
                if ( !(type == XEN_DOMCTL_PFINFO_L2TAB 
                       && sizeof (unsigned long) > guest_width) )
                     race = 1;  /* inform the caller; fatal if !live */ 
            }
            else
                pfn = mfn_to_pfn(mfn);

            pte &= ~MADDR_MASK_X86;
            pte |= (uint64_t)pfn << PAGE_SHIFT;

            /*
             * PAE guest L3Es can contain these flags when running on
             * a 64bit hypervisor. We zap these here to avoid any
             * surprise at restore time...
             */
            if ( (pt_levels == 3) &&
                 (type == XEN_DOMCTL_PFINFO_L3TAB) &&
                 (pte & (_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED)) )
                pte &= ~(_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);
        }

        if ( pt_levels == 2 )
            ((uint32_t*)dpage)[i] = pte;
        else
            ((uint64_t*)dpage)[i] = pte;
    }

    return race;
}

static xen_pfn_t *xc_map_m2p(int xc_handle,
                                 unsigned long max_mfn,
                                 int prot)
{
    struct xen_machphys_mfn_list xmml;
    privcmd_mmap_entry_t *entries;
    unsigned long m2p_chunks, m2p_size;
    xen_pfn_t *m2p;
    xen_pfn_t *extent_start;
    int i;

    m2p = NULL;
    m2p_size   = M2P_SIZE(max_mfn);
    m2p_chunks = M2P_CHUNKS(max_mfn);

    xmml.max_extents = m2p_chunks;

    extent_start = calloc(m2p_chunks, sizeof(xen_pfn_t));
    if ( !extent_start )
    {
        ERROR("failed to allocate space for m2p mfns");
        goto err0;
    }
    set_xen_guest_handle(xmml.extent_start, extent_start);

    if ( xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) ||
         (xmml.nr_extents != m2p_chunks) )
    {
        ERROR("xc_get_m2p_mfns");
        goto err1;
    }

    entries = calloc(m2p_chunks, sizeof(privcmd_mmap_entry_t));
    if (entries == NULL)
    {
        ERROR("failed to allocate space for mmap entries");
        goto err1;
    }

    for ( i = 0; i < m2p_chunks; i++ )
        entries[i].mfn = extent_start[i];

    m2p = xc_map_foreign_ranges(xc_handle, DOMID_XEN,
			m2p_size, prot, M2P_CHUNK_SIZE,
			entries, m2p_chunks);
    if (m2p == NULL)
    {
        ERROR("xc_mmap_foreign_ranges failed");
        goto err2;
    }

    m2p_mfn0 = entries[0].mfn;

err2:
    free(entries);
err1:
    free(extent_start);

err0:
    return m2p;
}


static xen_pfn_t *map_and_save_p2m_table(int xc_handle, 
                                         int io_fd, 
                                         uint32_t dom,
                                         unsigned long p2m_size,
                                         shared_info_any_t *live_shinfo)
{
    vcpu_guest_context_any_t ctxt;

    /* Double and single indirect references to the live P2M table */
    void *live_p2m_frame_list_list = NULL;
    void *live_p2m_frame_list = NULL;

    /* Copies of the above. */
    xen_pfn_t *p2m_frame_list_list = NULL;
    xen_pfn_t *p2m_frame_list = NULL;

    /* The mapping of the live p2m table itself */
    xen_pfn_t *p2m = NULL;

    int i, success = 0;

    live_p2m_frame_list_list = map_frame_list_list(xc_handle, dom,
                                                   live_shinfo);
    if ( !live_p2m_frame_list_list )
        goto out;

    /* Get a local copy of the live_P2M_frame_list_list */
    if ( !(p2m_frame_list_list = malloc(PAGE_SIZE)) )
    {
        ERROR("Couldn't allocate p2m_frame_list_list array");
        goto out;
    }
    memcpy(p2m_frame_list_list, live_p2m_frame_list_list, PAGE_SIZE);

    /* Canonicalize guest's unsigned long vs ours */
    if ( guest_width > sizeof(unsigned long) )
        for ( i = 0; i < PAGE_SIZE/sizeof(unsigned long); i++ )
            if ( i < PAGE_SIZE/guest_width )
                p2m_frame_list_list[i] = ((uint64_t *)p2m_frame_list_list)[i];
            else
                p2m_frame_list_list[i] = 0;
    else if ( guest_width < sizeof(unsigned long) )
        for ( i = PAGE_SIZE/sizeof(unsigned long) - 1; i >= 0; i-- )
            p2m_frame_list_list[i] = ((uint32_t *)p2m_frame_list_list)[i];

    live_p2m_frame_list =
        xc_map_foreign_batch(xc_handle, dom, PROT_READ,
                             p2m_frame_list_list,
                             P2M_FLL_ENTRIES);
    if ( !live_p2m_frame_list )
    {
        ERROR("Couldn't map p2m_frame_list");
        goto out;
    }

    /* Get a local copy of the live_P2M_frame_list */
    if ( !(p2m_frame_list = malloc(P2M_TOOLS_FL_SIZE)) )
    {
        ERROR("Couldn't allocate p2m_frame_list array");
        goto out;
    }
    memset(p2m_frame_list, 0, P2M_TOOLS_FL_SIZE);
    memcpy(p2m_frame_list, live_p2m_frame_list, P2M_GUEST_FL_SIZE);

    /* Canonicalize guest's unsigned long vs ours */
    if ( guest_width > sizeof(unsigned long) )
        for ( i = 0; i < P2M_FL_ENTRIES; i++ )
            p2m_frame_list[i] = ((uint64_t *)p2m_frame_list)[i];
    else if ( guest_width < sizeof(unsigned long) )
        for ( i = P2M_FL_ENTRIES - 1; i >= 0; i-- )
            p2m_frame_list[i] = ((uint32_t *)p2m_frame_list)[i];


    /* Map all the frames of the pfn->mfn table. For migrate to succeed,
       the guest must not change which frames are used for this purpose.
       (its not clear why it would want to change them, and we'll be OK
       from a safety POV anyhow. */

    p2m = xc_map_foreign_batch(xc_handle, dom, PROT_READ,
                               p2m_frame_list,
                               P2M_FL_ENTRIES);
    if ( !p2m )
    {
        ERROR("Couldn't map p2m table");
        goto out;
    }
    live_p2m = p2m; /* So that translation macros will work */
    
    /* Canonicalise the pfn-to-mfn table frame-number list. */
    for ( i = 0; i < p2m_size; i += FPP )
    {
        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(p2m_frame_list[i/FPP]) )
        {
            ERROR("Frame# in pfn-to-mfn frame list is not in pseudophys");
            ERROR("entry %d: p2m_frame_list[%ld] is 0x%"PRIx64", max 0x%lx",
                  i, i/FPP, (uint64_t)p2m_frame_list[i/FPP], max_mfn);
            if ( p2m_frame_list[i/FPP] < max_mfn ) 
            {
                ERROR("m2p[0x%"PRIx64"] = 0x%"PRIx64, 
                      (uint64_t)p2m_frame_list[i/FPP],
                      (uint64_t)live_m2p[p2m_frame_list[i/FPP]]);
                ERROR("p2m[0x%"PRIx64"] = 0x%"PRIx64, 
                      (uint64_t)live_m2p[p2m_frame_list[i/FPP]],
                      (uint64_t)p2m[live_m2p[p2m_frame_list[i/FPP]]]);

            }
            goto out;
        }
        p2m_frame_list[i/FPP] = mfn_to_pfn(p2m_frame_list[i/FPP]);
    }

    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
    {
        ERROR("Could not get vcpu context");
        goto out;
    }

    /*
     * Write an extended-info structure to inform the restore code that
     * a PAE guest understands extended CR3 (PDPTs above 4GB). Turns off
     * slow paths in the restore code.
     */
    {
        unsigned long signature = ~0UL;
        uint32_t chunk1_sz = ((guest_width==8) 
                              ? sizeof(ctxt.x64) 
                              : sizeof(ctxt.x32));
        uint32_t chunk2_sz = 0;
        uint32_t tot_sz    = (chunk1_sz + 8) + (chunk2_sz + 8);
        if ( write_exact(io_fd, &signature, sizeof(signature)) ||
             write_exact(io_fd, &tot_sz, sizeof(tot_sz)) ||
             write_exact(io_fd, "vcpu", 4) ||
             write_exact(io_fd, &chunk1_sz, sizeof(chunk1_sz)) ||
             write_exact(io_fd, &ctxt, chunk1_sz) ||
             write_exact(io_fd, "extv", 4) ||
             write_exact(io_fd, &chunk2_sz, sizeof(chunk2_sz)) )
        {
            PERROR("write: extended info");
            goto out;
        }
    }

    if ( write_exact(io_fd, p2m_frame_list, 
                     P2M_FL_ENTRIES * sizeof(xen_pfn_t)) )
    {
        PERROR("write: p2m_frame_list");
        goto out;
    }

    success = 1;

 out:
    
    if ( !success && p2m )
        munmap(p2m, P2M_FLL_ENTRIES * PAGE_SIZE);

    if ( live_p2m_frame_list_list )
        munmap(live_p2m_frame_list_list, PAGE_SIZE);

    if ( live_p2m_frame_list )
        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);

    if ( p2m_frame_list_list ) 
        free(p2m_frame_list_list);

    if ( p2m_frame_list ) 
        free(p2m_frame_list);

    return success ? p2m : NULL;
}

int xc_domain_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
                   uint32_t max_factor, uint32_t flags, int (*suspend)(void),
                   int hvm, void *(*init_qemu_maps)(int, unsigned), 
                   void (*qemu_flip_buffer)(int, int))
{
    xc_dominfo_t info;
    DECLARE_DOMCTL;
    unsigned long count=0;
  
  
    int rc = 1, frc, i, j, last_iter, iter = 0;
    int live  = (flags & XCFLAGS_LIVE);
    int debug = (flags & XCFLAGS_DEBUG);
    int race = 0, sent_last_iter, skip_this_iter;

    /* The new domain's shared-info frame number. */
    unsigned long shared_info_frame;

    /* A copy of the CPU context of the guest. */
    vcpu_guest_context_any_t ctxt;

    /* A table containing the type of each PFN (/not/ MFN!). */
    unsigned long *pfn_type = NULL;
    unsigned long *pfn_batch = NULL;

    /* A copy of one frame of guest memory. */
    char page[PAGE_SIZE];

    /* Live mapping of shared info structure */
    shared_info_any_t *live_shinfo = NULL;

    /* base of the region in which domain memory is mapped */
    unsigned char *region_base = NULL;

    /* bitmap of pages:
       - that should be sent this iteration (unless later marked as skip);
       - to skip this iteration because already dirty;
       - to fixup by sending at the end if not already resent; */
    unsigned long *to_send = NULL, *to_skip = NULL, *to_fix = NULL;

    xc_shadow_op_stats_t stats;

    unsigned long needed_to_fix = 0;
    unsigned long total_sent    = 0;

    uint64_t vcpumap = 1ULL;

    /* HVM: a buffer for holding HVM context */
    uint32_t hvm_buf_size = 0;
    uint8_t *hvm_buf = NULL;

    /* HVM: magic frames for ioreqs and xenstore comms. */
    uint64_t magic_pfns[3]; /* ioreq_pfn, bufioreq_pfn, store_pfn */

    unsigned long mfn;

    /* If no explicit control parameters given, use defaults */
    max_iters  = max_iters  ? : DEF_MAX_ITERS;
    max_factor = max_factor ? : DEF_MAX_FACTOR;

    initialize_mbit_rate();

    if ( !get_platform_info(xc_handle, dom,
                            &max_mfn, &hvirt_start, &pt_levels, &guest_width) )
    {
        ERROR("Unable to get platform info.");
        return 1;
    }

    if ( xc_domain_getinfo(xc_handle, dom, 1, &info) != 1 )
    {
        ERROR("Could not get domain info");
        return 1;
    }

    shared_info_frame = info.shared_info_frame;

    /* Map the shared info frame */
    if ( !hvm )
    {
        live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
                                           PROT_READ, shared_info_frame);
        if ( !live_shinfo )
        {
            ERROR("Couldn't map live_shinfo");
            goto out;
        }
    }

    /* Get the size of the P2M table */
    p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom) + 1;

    /* Setup to_send / to_fix and to_skip bitmaps */
    to_send = xg_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
    to_fix  = calloc(1, BITMAP_SIZE);
    to_skip = xg_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 

    if ( !to_send || !to_fix || !to_skip )
    {
        ERROR("Couldn't allocate to_send array");
        goto out;
    }
/***************************************************begin of testing memory writable work set**********************************/
#ifdef TEST_WWS

    if ( lock_pages(to_send, BITMAP_SIZE) )
    {
        ERROR("Unable to lock to_send");
        return 1;
    }

    /* (to fix is local only) */
    if ( lock_pages(to_skip, BITMAP_SIZE) )
    {
        ERROR("Unable to lock to_skip");
        return 1;
    }


    /*Enable log-dirty mode. */
    if ( xc_shadow_control(xc_handle, dom,
                               XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
                               NULL, 0, NULL, 0, NULL) < 0 )
    {
            /* log-dirty already enabled? There's no test op,
               so attempt to disable then reenable it */
        frc = xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_OFF,
                                    NULL, 0, NULL, 0, NULL);
        if ( frc >= 0 )
        {
            frc = xc_shadow_control(xc_handle, dom,
                                        XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
                                        NULL, 0, NULL, 0, NULL);
        }
            
        if ( frc < 0 )
        {
            ERROR("Couldn't enable shadow mode (rc %d) (errno %d)", frc, errno );
            goto out;
        }
    }
        
	     	
            if ( xc_shadow_control(xc_handle, dom, 
                                   XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
                                   p2m_size, NULL, 0, &stats) != p2m_size )
            {
                ERROR("Error flushing shadow PT");
                goto out;
            }
	    if ( xc_shadow_control(xc_handle, dom, 
                                   XEN_DOMCTL_SHADOW_OP_CLEAN, to_fix, 
                                   p2m_size, NULL, 0, &stats) != p2m_size )
            {
                ERROR("Error flushing shadow PT");
                goto out;
            }

	  sleep(10);
        count=0;
	frc = xc_shadow_control(
                           xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK, to_send, 
                           p2m_size, NULL, 0, NULL);
            if ( frc != p2m_size )
            {
                ERROR("Error peeking to_send shadow bitmap");
                goto out;
            }

		for(j=0;j<p2m_size;j++)
		{
			if( test_bit(j, to_send)==1 )
			{
			    count++;		
			}
		}
	DPRINTF(" \n delay time 10(s), To_send page number is, %ld",  count);

	count=0;
	frc = xc_shadow_control(
                           xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK, to_skip, 
                           p2m_size, NULL, 0, NULL);
            if ( frc != p2m_size )
            {
                ERROR("Error peeking to_skip shadow bitmap");
                goto out;
            }

		for(j=0;j<p2m_size;j++)
		{
			if( test_bit(j, to_skip)==1 )
			{
			    count++;		
			}
		}
	DPRINTF(" \n delay time 10(s), To_skip page number is, %ld",  count);
#endif
/***************************************************end of testing memory writable work set**********************************/



#ifdef TEST_MEMORY_DIRTY
/***************************************************begin of testing memory dirty rate **********************************/
/** struct timeval time_start, time_end;
 long long testtime;
**/
   
	if ( lock_pages(to_send, BITMAP_SIZE) )
    {
        ERROR("Unable to lock to_send");
        return 1;
    }

    /* (to fix is local only) */
    if ( lock_pages(to_skip, BITMAP_SIZE) )
    {
        ERROR("Unable to lock to_skip");
        return 1;
    }


    /*Enable log-dirty mode. */
    if ( xc_shadow_control(xc_handle, dom,
                               XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
                               NULL, 0, NULL, 0, NULL) < 0 )
    {
            /* log-dirty already enabled? There's no test op,
               so attempt to disable then reenable it */
        frc = xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_OFF,
                                    NULL, 0, NULL, 0, NULL);
        if ( frc >= 0 )
        {
            frc = xc_shadow_control(xc_handle, dom,
                                        XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
                                        NULL, 0, NULL, 0, NULL);
        }
            
        if ( frc < 0 )
        {
            ERROR("Couldn't enable shadow mode (rc %d) (errno %d)", frc, errno );
            goto out;
        }
    }

	for(i=0;i<120;i++)
	{
	 
		count=0;
	     	
            if ( xc_shadow_control(xc_handle, dom, 
                                   XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
                                   p2m_size, NULL, 0, &stats) != p2m_size )
            {
                ERROR("Error flushing shadow PT");
                goto out;
            }
		
		
		usleep(500000);
		
            frc = xc_shadow_control(
                           xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK, to_send, 
                           p2m_size, NULL, 0, NULL);
            if ( frc != p2m_size )
            {
                ERROR("Error peeking shadow bitmap");
                goto out;
            }

		for(j=0;j<p2m_size;j++)
		{
			if( test_bit(j, to_send)==1 )
			{
			    count++;		
			}
		}
		
		/*DPRINTF(" \n %d,delay time(ms), %ld,  dirty page number is, %ld, normalized dirty rate,%f", i, delay/1000, count, new_rate); */
		DPRINTF(" \n %d,peeking delay time(500ms),  dirty page number is, %ld", i, count);
		
		}
/*************************************************** end of testing memory dirty rate **********************************/
#endif

/* Domain is still running at this point */

       if ( live )
    {
        /* Live suspend. Enable log-dirty mode. */
        if ( xc_shadow_control(xc_handle, dom,
                               XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
                               NULL, 0, NULL, 0, NULL) < 0 )
        {
            /* log-dirty already enabled? There's no test op,
               so attempt to disable then reenable it */
            frc = xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_OFF,
                                    NULL, 0, NULL, 0, NULL);
            if ( frc >= 0 )
            {
                frc = xc_shadow_control(xc_handle, dom,
                                        XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
                                        NULL, 0, NULL, 0, NULL);
            }
            
            if ( frc < 0 )
            {
                ERROR("Couldn't enable shadow mode (rc %d) (errno %d)", frc, errno );
                goto out;
            }
        }

        if ( hvm )
        {
            /* Get qemu-dm logging dirty pages too */
            void *seg = init_qemu_maps(dom, BITMAP_SIZE);
            qemu_bitmaps[0] = seg;
            qemu_bitmaps[1] = seg + BITMAP_SIZE;
            qemu_active = 0;
            qemu_non_active = 1;
        }
    }
    else
    {
        /* This is a non-live suspend. Suspend the domain .*/
        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info) )
        {
            ERROR("Domain appears not to have suspended");
            goto out;
        }
    }

    last_iter = !live;

    /* pretend we sent all the pages last iteration */
    sent_last_iter = p2m_size;



    memset(to_send, 0xff, BITMAP_SIZE);

    if ( lock_pages(to_send, BITMAP_SIZE) )
    {
        ERROR("Unable to lock to_send");
        return 1;
    } 

    /* (to fix is local only) */
    if ( lock_pages(to_skip, BITMAP_SIZE) )
    {
        ERROR("Unable to lock to_skip");
        return 1;
    }

    if ( hvm ) 
    {
        /* Need another buffer for HVM context */
        hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
        if ( hvm_buf_size == -1 )
        {
            ERROR("Couldn't get HVM context size from Xen");
            goto out;
        }
        hvm_buf = malloc(hvm_buf_size);
        if ( !hvm_buf )
        {
            ERROR("Couldn't allocate memory");
            goto out;
        }
    }

    analysis_phase(xc_handle, dom, p2m_size, to_skip, 0);

    pfn_type   = xg_memalign(PAGE_SIZE, ROUNDUP(
                              MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT));
    pfn_batch  = calloc(MAX_BATCH_SIZE, sizeof(*pfn_batch));
    if ( (pfn_type == NULL) || (pfn_batch == NULL) )
    {
        ERROR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
        errno = ENOMEM;
        goto out;
    }
    memset(pfn_type, 0,
           ROUNDUP(MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT));

    if ( lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) )
    {
        ERROR("Unable to lock pfn_type array");
        goto out;
    }

    /* Setup the mfn_to_pfn table mapping */
    if ( !(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ)) )
    {
        ERROR("Failed to map live M2P table");
        goto out;
    }

    /* Start writing out the saved-domain record. */
    if ( write_exact(io_fd, &p2m_size, sizeof(unsigned long)) )
    {
        PERROR("write: p2m_size");
        goto out;
    }

    if ( !hvm )
    {
        int err = 0;

        /* Map the P2M table, and write the list of P2M frames */
        live_p2m = map_and_save_p2m_table(xc_handle, io_fd, dom, 
                                          p2m_size, live_shinfo);
        if ( live_p2m == NULL )
        {
            ERROR("Failed to map/save the p2m frame list");
            goto out;
        }

        /*
         * Quick belt and braces sanity check.
         */
        
        for ( i = 0; i < p2m_size; i++ )
        {
            mfn = pfn_to_mfn(i);
            if( (mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i) )
            {
                DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i,
                        mfn, mfn_to_pfn(mfn));
                err++;
            }
        }
        DPRINTF("Had %d unexplained entries in p2m table\n", err);
    }

    print_stats(xc_handle, dom, 0, &stats, 0);

    /* Now write out each data page, canonicalising page tables as we go... */
    for ( ; ; )
    {
        unsigned int prev_pc, sent_this_iter, N, batch, run;

        iter++;
        sent_this_iter = 0;
        skip_this_iter = 0;
        prev_pc = 0;
        N = 0;

        DPRINTF("Saving memory pages: iter %d   0%%", iter);

        while ( N < p2m_size )
        {
            unsigned int this_pc = (N * 100) / p2m_size;

            if ( (this_pc - prev_pc) >= 5 )
            {
                DPRINTF("\b\b\b\b%3d%%", this_pc);
                prev_pc = this_pc;
            }

            if ( !last_iter )
            {
                /* Slightly wasteful to peek the whole array evey time,
                   but this is fast enough for the moment. */
                frc = xc_shadow_control(
                    xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK, to_skip, 
                    p2m_size, NULL, 0, NULL);
                if ( frc != p2m_size )
                {
                    ERROR("Error peeking shadow bitmap");
                    goto out;
                }
            }

            /* load pfn_type[] with the mfn of all the pages we're doing in
               this batch. */
            for  ( batch = 0;
                   (batch < MAX_BATCH_SIZE) && (N < p2m_size);
                   N++ )
            {
                int n = N;

                if ( debug )
                {
                    DPRINTF("%d pfn= %08lx mfn= %08lx %d",
                            iter, (unsigned long)n,
                            hvm ? 0 : pfn_to_mfn(n),
                            test_bit(n, to_send));
                    if ( !hvm && is_mapped(pfn_to_mfn(n)) )
                        DPRINTF("  [mfn]= %08lx",
                                mfn_to_pfn(pfn_to_mfn(n)&0xFFFFF));
                    DPRINTF("\n");
                }
                if ( !last_iter &&
                     test_bit(n, to_send) &&
                     test_bit(n, to_skip) )
                    skip_this_iter++; /* stats keeping */

                if ( !((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
                       (test_bit(n, to_send) && last_iter) ||
                       (test_bit(n, to_fix)  && last_iter)) )
                    continue;

                /*
                ** we get here if:
                **  1. page is marked to_send & hasn't already been re-dirtied
                **  2. (ignore to_skip in last iteration)
                **  3. add in pages that still need fixup (net bufs)
                */

                pfn_batch[batch] = n;

                /* Hypercall interfaces operate in PFNs for HVM guests
                * and MFNs for PV guests */
                if ( hvm ) 
                    pfn_type[batch] = n;
                else
                    pfn_type[batch] = pfn_to_mfn(n);
                    
                if ( !is_mapped(pfn_type[batch]) )
                {
                    /*
                    ** not currently in psuedo-physical map -- set bit
                    ** in to_fix since we must send this page in last_iter
                    ** unless its sent sooner anyhow, or it never enters
                    ** pseudo-physical map (e.g. for ballooned down doms)
                    */
                    set_bit(n, to_fix);
                    continue;
                }

                if ( last_iter &&
                     test_bit(n, to_fix) &&
                     !test_bit(n, to_send) )
                {
                    needed_to_fix++;
                    DPRINTF("Fix! iter %d, pfn %x. mfn %lx\n",
                            iter, n, pfn_type[batch]);
                }
                
                clear_bit(n, to_fix);
                
                batch++;
            }

            if ( batch == 0 )
                goto skip; /* vanishingly unlikely... */

            region_base = xc_map_foreign_batch(
                xc_handle, dom, PROT_READ, pfn_type, batch);
            if ( region_base == NULL )
            {
                ERROR("map batch failed");
                goto out;
            }

            if ( hvm )
            {
                /* Look for and skip completely empty batches. */
                for ( j = 0; j < batch; j++ )
                    if ( (pfn_type[j] & XEN_DOMCTL_PFINFO_LTAB_MASK) !=
                         XEN_DOMCTL_PFINFO_XTAB )
                        break;
                if ( j == batch )
                {
                    munmap(region_base, batch*PAGE_SIZE);
                    continue; /* bail on this batch: no valid pages */
                }
            }
            else
            {
                /* Get page types */
                for ( j = 0; j < batch; j++ )
                    ((uint32_t *)pfn_type)[j] = pfn_type[j];
                if ( xc_get_pfn_type_batch(xc_handle, dom, batch,
                                           (uint32_t *)pfn_type) )
                {
                    ERROR("get_pfn_type_batch failed");
                    goto out;
                }
                for ( j = batch-1; j >= 0; j-- )
                    pfn_type[j] = ((uint32_t *)pfn_type)[j];

                for ( j = 0; j < batch; j++ )
                {
                    
                    if ( (pfn_type[j] & XEN_DOMCTL_PFINFO_LTAB_MASK) ==
                         XEN_DOMCTL_PFINFO_XTAB )
                    {
                        DPRINTF("type fail: page %i mfn %08lx\n", 
                                j, pfn_type[j]);
                        continue;
                    }
                    
                    if ( debug )
                        DPRINTF("%d pfn= %08lx mfn= %08lx [mfn]= %08lx"
                                " sum= %08lx\n",
                                iter,
                                (pfn_type[j] & XEN_DOMCTL_PFINFO_LTAB_MASK) |
                                pfn_batch[j],
                                pfn_type[j],
                                mfn_to_pfn(pfn_type[j] &
                                           ~XEN_DOMCTL_PFINFO_LTAB_MASK),
                                csum_page(region_base + (PAGE_SIZE*j)));
                    
                    /* canonicalise mfn->pfn */
                    pfn_type[j] = (pfn_type[j] & XEN_DOMCTL_PFINFO_LTAB_MASK) |
                        pfn_batch[j];
                }
            }

            if ( write_exact(io_fd, &batch, sizeof(unsigned int)) )
            {
                PERROR("Error when writing to state file (2)");
                goto out;
            }

            if ( write_exact(io_fd, pfn_type, sizeof(unsigned long)*batch) )
            {
                PERROR("Error when writing to state file (3)");
                goto out;
            }

            /* entering this loop, pfn_type is now in pfns (Not mfns) */
            run = 0;
            for ( j = 0; j < batch; j++ )
            {
                unsigned long pfn, pagetype;
                void *spage = (char *)region_base + (PAGE_SIZE*j);

                pfn      = pfn_type[j] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
                pagetype = pfn_type[j] &  XEN_DOMCTL_PFINFO_LTAB_MASK;

                if ( pagetype != 0 )
                {
                    /* If the page is not a normal data page, write out any
                       run of pages we may have previously acumulated */
                    if ( run )
                    {
                        if ( ratewrite(io_fd, live, 
                                       (char*)region_base+(PAGE_SIZE*(j-run)), 
                                       PAGE_SIZE*run) != PAGE_SIZE*run )
                        {
                            ERROR("Error when writing to state file (4a)"
                                  " (errno %d)", errno);
                            goto out;
                        }                        
                        run = 0;
                    }
                }

                /* skip pages that aren't present */
                if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
                    continue;

                pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;

                if ( (pagetype >= XEN_DOMCTL_PFINFO_L1TAB) &&
                     (pagetype <= XEN_DOMCTL_PFINFO_L4TAB) )
                {
                    /* We have a pagetable page: need to rewrite it. */
                    race = 
                        canonicalize_pagetable(pagetype, pfn, spage, page); 

                    if ( race && !live )
                    {
                        ERROR("Fatal PT race (pfn %lx, type %08lx)", pfn,
                              pagetype);
                        goto out;
                    }

                    if ( ratewrite(io_fd, live, page, PAGE_SIZE) != PAGE_SIZE )
                    {
                        ERROR("Error when writing to state file (4b)"
                              " (errno %d)", errno);
                        goto out;
                    }
                }
                else
                {
                    /* We have a normal page: accumulate it for writing. */
                    run++;
                }
            } /* end of the write out for this batch */

            if ( run )
            {
                /* write out the last accumulated run of pages */
                if ( ratewrite(io_fd, live, 
                               (char*)region_base+(PAGE_SIZE*(j-run)), 
                               PAGE_SIZE*run) != PAGE_SIZE*run )
                {
                    ERROR("Error when writing to state file (4c)"
                          " (errno %d)", errno);
                    goto out;
                }                        
            }

            sent_this_iter += batch;

            munmap(region_base, batch*PAGE_SIZE);

        } /* end of this while loop for this iteration */

      skip:

        total_sent += sent_this_iter;

        DPRINTF("\r %d: sent %d, skipped %d, ",
                iter, sent_this_iter, skip_this_iter );

        if ( last_iter )
        {
            print_stats( xc_handle, dom, sent_this_iter, &stats, 1);

            DPRINTF("Total pages sent= %ld (%.2fx)\n",
                    total_sent, ((float)total_sent)/p2m_size );
            DPRINTF("(of which %ld were fixups)\n", needed_to_fix  );
        }

        if ( last_iter && debug )
        {
            int minusone = -1;
            memset(to_send, 0xff, BITMAP_SIZE);
            debug = 0;
            DPRINTF("Entering debug resend-all mode\n");

            /* send "-1" to put receiver into debug mode */
            if ( write_exact(io_fd, &minusone, sizeof(int)) )
            {
                PERROR("Error when writing to state file (6)");
                goto out;
            }

            continue;
        }

        if ( last_iter )
            break;

        if ( live )
        {
            if ( ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
                 (iter >= max_iters) ||
                 (sent_this_iter+skip_this_iter < 50) ||
                 (total_sent > p2m_size*max_factor) )
            {
                DPRINTF("Start last iteration\n");
                last_iter = 1;

                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info) )
                {
                    ERROR("Domain appears not to have suspended");
                    goto out;
                }

                DPRINTF("SUSPEND shinfo %08lx\n", info.shared_info_frame);
            }

            if ( xc_shadow_control(xc_handle, dom, 
                                   XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
                                   p2m_size, NULL, 0, &stats) != p2m_size )
            {
                ERROR("Error flushing shadow PT");
                goto out;
            }

            if ( hvm ) 
            {
                /* Pull in the dirty bits from qemu-dm too */
                if ( !last_iter )
                {
                    qemu_active = qemu_non_active;
                    qemu_non_active = qemu_active ? 0 : 1;
                    qemu_flip_buffer(dom, qemu_active);
                    for ( j = 0; j < BITMAP_SIZE / sizeof(unsigned long); j++ )
                    {
                        to_send[j] |= qemu_bitmaps[qemu_non_active][j];
                        qemu_bitmaps[qemu_non_active][j] = 0;
                    }
                }
                else
                {
                    for ( j = 0; j < BITMAP_SIZE / sizeof(unsigned long); j++ )
                        to_send[j] |= qemu_bitmaps[qemu_active][j];
                }
            }

            sent_last_iter = sent_this_iter;

            print_stats(xc_handle, dom, sent_this_iter, &stats, 1);

        }
    } /* end of infinite for loop */

    DPRINTF("All memory is saved\n");

    {
        struct {
            int minustwo;
            int max_vcpu_id;
            uint64_t vcpumap;
        } chunk = { -2, info.max_vcpu_id };

        if ( info.max_vcpu_id >= 64 )
        {
            ERROR("Too many VCPUS in guest!");
            goto out;
        }

        for ( i = 1; i <= info.max_vcpu_id; i++ )
        {
            xc_vcpuinfo_t vinfo;
            if ( (xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) &&
                 vinfo.online )
                vcpumap |= 1ULL << i;
        }

        chunk.vcpumap = vcpumap;
        if ( write_exact(io_fd, &chunk, sizeof(chunk)) )
        {
            PERROR("Error when writing to state file");
            goto out;
        }
    }

    if ( hvm )
    {
        struct {
            int id;
            uint32_t pad;
            uint64_t data;
        } chunk = { 0, };

        chunk.id = -3;
        xc_get_hvm_param(xc_handle, dom, HVM_PARAM_IDENT_PT,
                         (unsigned long *)&chunk.data);

        if ( (chunk.data != 0) &&
             write_exact(io_fd, &chunk, sizeof(chunk)) )
        {
            PERROR("Error when writing the ident_pt for EPT guest");
            goto out;
        }

        chunk.id = -4;
        xc_get_hvm_param(xc_handle, dom, HVM_PARAM_VM86_TSS,
                         (unsigned long *)&chunk.data);

        if ( (chunk.data != 0) &&
             write_exact(io_fd, &chunk, sizeof(chunk)) )
        {
            PERROR("Error when writing the vm86 TSS for guest");
            goto out;
        }
    }

    /* Zero terminate */
    i = 0;
    if ( write_exact(io_fd, &i, sizeof(int)) )
    {
        PERROR("Error when writing to state file (6')");
        goto out;
    }

    if ( hvm ) 
    {
        uint32_t rec_size;

        /* Save magic-page locations. */
        memset(magic_pfns, 0, sizeof(magic_pfns));
        xc_get_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN,
                         (unsigned long *)&magic_pfns[0]);
        xc_get_hvm_param(xc_handle, dom, HVM_PARAM_BUFIOREQ_PFN,
                         (unsigned long *)&magic_pfns[1]);
        xc_get_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN,
                         (unsigned long *)&magic_pfns[2]);
        if ( write_exact(io_fd, magic_pfns, sizeof(magic_pfns)) )
        {
            PERROR("Error when writing to state file (7)");
            goto out;
        }

        /* Get HVM context from Xen and save it too */
        if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, 
                                                  hvm_buf_size)) == -1 )
        {
            ERROR("HVM:Could not get hvm buffer");
            goto out;
        }
        
        if ( write_exact(io_fd, &rec_size, sizeof(uint32_t)) )
        {
            PERROR("error write hvm buffer size");
            goto out;
        }
        
        if ( write_exact(io_fd, hvm_buf, rec_size) )
        {
            PERROR("write HVM info failed!\n");
            goto out;
        }
        
        /* HVM guests are done now */
        rc = 0;
        goto out;
    }

    /* PV guests only from now on */

    /* Send through a list of all the PFNs that were not in map at the close */
    {
        unsigned int i,j;
        unsigned long pfntab[1024];

        for ( i = 0, j = 0; i < p2m_size; i++ )
        {
            if ( !is_mapped(pfn_to_mfn(i)) )
                j++;
        }

        if ( write_exact(io_fd, &j, sizeof(unsigned int)) )
        {
            PERROR("Error when writing to state file (6a)");
            goto out;
        }

        for ( i = 0, j = 0; i < p2m_size; )
        {
            if ( !is_mapped(pfn_to_mfn(i)) )
                pfntab[j++] = i;

            i++;
            if ( (j == 1024) || (i == p2m_size) )
            {
                if ( write_exact(io_fd, &pfntab, sizeof(unsigned long)*j) )
                {
                    PERROR("Error when writing to state file (6b)");
                    goto out;
                }
                j = 0;
            }
        }
    }

    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
    {
        ERROR("Could not get vcpu context");
        goto out;
    }

    /* Canonicalise the suspend-record frame number. */
    mfn = GET_FIELD(&ctxt, user_regs.edx);
    if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
    {
        ERROR("Suspend record is not in range of pseudophys map");
        goto out;
    }
    SET_FIELD(&ctxt, user_regs.edx, mfn_to_pfn(mfn));

    for ( i = 0; i <= info.max_vcpu_id; i++ )
    {
        if ( !(vcpumap & (1ULL << i)) )
            continue;

        if ( (i != 0) && xc_vcpu_getcontext(xc_handle, dom, i, &ctxt) )
        {
            ERROR("No context for VCPU%d", i);
            goto out;
        }

        /* Canonicalise each GDT frame number. */
        for ( j = 0; (512*j) < GET_FIELD(&ctxt, gdt_ents); j++ )
        {
            mfn = GET_FIELD(&ctxt, gdt_frames[j]);
            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
            {
                ERROR("GDT frame is not in range of pseudophys map");
                goto out;
            }
            SET_FIELD(&ctxt, gdt_frames[j], mfn_to_pfn(mfn));
        }

        /* Canonicalise the page table base pointer. */
        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(UNFOLD_CR3(
                                           GET_FIELD(&ctxt, ctrlreg[3]))) )
        {
            ERROR("PT base is not in range of pseudophys map");
            goto out;
        }
        SET_FIELD(&ctxt, ctrlreg[3], 
            FOLD_CR3(mfn_to_pfn(UNFOLD_CR3(GET_FIELD(&ctxt, ctrlreg[3])))));

        /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
        if ( (pt_levels == 4) && ctxt.x64.ctrlreg[1] )
        {
            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(UNFOLD_CR3(ctxt.x64.ctrlreg[1])) )
            {
                ERROR("PT base is not in range of pseudophys map");
                goto out;
            }
            /* Least-significant bit means 'valid PFN'. */
            ctxt.x64.ctrlreg[1] = 1 |
                FOLD_CR3(mfn_to_pfn(UNFOLD_CR3(ctxt.x64.ctrlreg[1])));
        }

        if ( write_exact(io_fd, &ctxt, ((guest_width==8) 
                                        ? sizeof(ctxt.x64) 
                                        : sizeof(ctxt.x32))) )
        {
            PERROR("Error when writing to state file (1)");
            goto out;
        }

        domctl.cmd = XEN_DOMCTL_get_ext_vcpucontext;
        domctl.domain = dom;
        domctl.u.ext_vcpucontext.vcpu = i;
        if ( xc_domctl(xc_handle, &domctl) < 0 )
        {
            ERROR("No extended context for VCPU%d", i);
            goto out;
        }
        if ( write_exact(io_fd, &domctl.u.ext_vcpucontext, 128) )
        {
            PERROR("Error when writing to state file (2)");
            goto out;
        }
    }

    /*
     * Reset the MFN to be a known-invalid value. See map_frame_list_list().
     */
    memcpy(page, live_shinfo, PAGE_SIZE);
    SET_FIELD(((shared_info_any_t *)page), 
              arch.pfn_to_mfn_frame_list_list, 0);
    if ( write_exact(io_fd, page, PAGE_SIZE) )
    {
        PERROR("Error when writing to state file (1)");
        goto out;
    }

    /* Success! */
    rc = 0;

 out:

    if ( live )
    {
        if ( xc_shadow_control(xc_handle, dom, 
                               XEN_DOMCTL_SHADOW_OP_OFF,
                               NULL, 0, NULL, 0, NULL) < 0 )
            DPRINTF("Warning - couldn't disable shadow mode");
    }

    /* Flush last write and discard cache for file. */
    discard_file_cache(io_fd, 1 /* flush */);

    if ( live_shinfo )
        munmap(live_shinfo, PAGE_SIZE);

    if ( live_p2m )
        munmap(live_p2m, P2M_FLL_ENTRIES * PAGE_SIZE);

    if ( live_m2p )
        munmap(live_m2p, M2P_SIZE(max_mfn));

    free(pfn_type);
    free(pfn_batch);
    free(to_send);
    free(to_fix);
    free(to_skip);

    DPRINTF("Save exit rc=%d\n",rc);

    return !!rc;
}

/*
 * Local variables:
 * mode: C
 * c-set-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */

My work is based on Xen 3.4. You need to modify the attached file "xc_domain_save.c" to measure memory dirtying rate. Please see the details in the source code and my paper.  you should find the test results in /var/log/xen/xend.log,suppose I still remember the location. The attached file TPCC.txt is an example. 



/* * linux/arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King * Copyright (c) 2003 ARM Limited * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Kernel startup code for all 32-bit CPUs */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/cp15.h> #include <asm/domain.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/memory.h> #include <asm/thread_info.h> #include <asm/pgtable.h> #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE #endif /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect * the least significant 16 bits to be 0x8000, but we could probably * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. */ #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif #ifdef CONFIG_ARM_LPAE /* LPAE requires an additional page for the PGD */ #define PG_DIR_SIZE 0x5000 #define PMD_ORDER 3 #else #define PG_DIR_SIZE 0x4000 #define PMD_ORDER 2 #endif .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE .macro pgtbl, rd, phys add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE .endm /* * Kernel startup entry point. * --------------------------- * * This is normally called from the decompressor code. The requirements * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, * r1 = machine nr, r2 = atags or dtb pointer. * * This code is mostly position independent, so if you link the kernel at * 0xc0008000, you call this at __pa(0xc0008000). * * See linux/arch/arm/tools/mach-types for the complete list of machine * numbers for r1. * * We're trying to keep crap to a minimum; DO NOT add any machine specific * crap here - that's what the boot loader (or in extreme, well justified * circumstances, zImage) is for. */ .arm __HEAD ENTRY(stext) THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install #endif @ ensure svc mode and all interrupts masked safe_svcmode_maskall r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type @ r5=procinfo r9=cpuid movs r10, r5 @ invalid processor (r5=0)? THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p @ yes, error 'p' #ifdef CONFIG_ARM_LPAE mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0 and r3, r3, #0xf @ extract VMSA support cmp r3, #5 @ long-descriptor translation table format? THUMB( it lo ) @ force fixup-able long branch encoding blo __error_p @ only classic page table format #endif #ifndef CONFIG_XIP_KERNEL adr r3, 2f ldmia r3, {r4, r8} sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) add r8, r8, r4 @ PHYS_OFFSET #else ldr r8, =PHYS_OFFSET @ always constant in this case #endif /* * r1 = machine no, r2 = atags or dtb, * r8 = phys_offset, r9 = cpuid, r10 = procinfo */ bl __vet_atags #ifdef CONFIG_SMP_ON_UP bl __fixup_smp #endif #ifdef CONFIG_ARM_PATCH_PHYS_VIRT bl __fixup_pv_table #endif bl __create_page_tables /* * The following calls CPU specific code in a position independent * manner. See arch/arm/mm/proc-*.S for details. r10 = base of * xxx_proc_info structure selected by __lookup_processor_type * above. On return, the CPU will be ready for the MMU to be * turned on, and r0 will hold the CPU control register value. */ ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled adr lr, BSYM(1f) @ return (PIC) address mov r8, r4 @ set TTBR1 to swapper_pg_dir ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) 1: b __enable_mmu ENDPROC(stext) .ltorg #ifndef CONFIG_XIP_KERNEL 2: .long . .long PAGE_OFFSET #endif /* * Setup the initial page tables. We only setup the barest * amount which are required to get the kernel running, which * generally means mapping in the kernel code. * * r8 = phys_offset, r9 = cpuid, r10 = procinfo * * Returns: * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: pgtbl r4, r8 @ page table address /* * Clear the swapper page table */ mov r0, r4 mov r3, #0 add r6, r0, #PG_DIR_SIZE 1: str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 teq r0, r6 bne 1b #ifdef CONFIG_ARM_LPAE /* * Build the PGD table (first level) to point to the PMD table. A PGD * entry is 64-bit wide. */ mov r0, r4 add r3, r4, #0x1000 @ first PMD table address orr r3, r3, #3 @ PGD block type mov r6, #4 @ PTRS_PER_PGD mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER 1: #ifdef CONFIG_CPU_ENDIAN_BE8 str r7, [r0], #4 @ set top PGD entry bits str r3, [r0], #4 @ set bottom PGD entry bits #else str r3, [r0], #4 @ set bottom PGD entry bits str r7, [r0], #4 @ set top PGD entry bits #endif add r3, r3, #0x1000 @ next PMD table subs r6, r6, #1 bne 1b add r4, r4, #0x1000 @ point to the PMD tables #ifdef CONFIG_CPU_ENDIAN_BE8 add r4, r4, #4 @ we only write the bottom word #endif #endif ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* * Create identity mapping to cater for __enable_mmu. * This identity mapping will be removed by paging_init(). */ adr r0, __turn_mmu_on_loc ldmia r0, {r3, r5, r6} sub r0, r0, r3 @ virt->phys offset add r5, r5, r0 @ phys __turn_mmu_on add r6, r6, r0 @ phys __turn_mmu_on_end mov r5, r5, lsr #SECTION_SHIFT mov r6, r6, lsr #SECTION_SHIFT 1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping cmp r5, r6 addlo r5, r5, #1 @ next section blo 1b /* * Map our RAM from the start to the end of the kernel .bss section. */ add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) ldr r6, =(_end - 1) orr r3, r8, r7 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 1: str r3, [r0], #1 << PMD_ORDER add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 bls 1b #ifdef CONFIG_XIP_KERNEL /* * Map the kernel image separately as it is not located in RAM. */ #define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) mov r3, pc mov r3, r3, lsr #SECTION_SHIFT orr r3, r7, r3, lsl #SECTION_SHIFT add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! ldr r6, =(_edata_loc - 1) add r0, r0, #1 << PMD_ORDER add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 1: cmp r0, r6 add r3, r3, #1 << SECTION_SHIFT strls r3, [r0], #1 << PMD_ORDER bls 1b #endif /* * Then map boot params address in r2 if specified. * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT subne r3, r0, r8 addne r3, r3, #PAGE_OFFSET addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) orrne r6, r7, r0 strne r6, [r3], #1 << PMD_ORDER addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] #if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) sub r4, r4, #4 @ Fixup page table pointer @ for 64-bit descriptors #endif #ifdef CONFIG_DEBUG_LL #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) /* * Map in IO space for serial debugging. * This allows debug messages to be output * via a serial console before paging_init. */ addruart r7, r3, r0 mov r3, r3, lsr #SECTION_SHIFT mov r3, r3, lsl #PMD_ORDER add r0, r4, r3 mov r3, r7, lsr #SECTION_SHIFT ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags orr r3, r7, r3, lsl #SECTION_SHIFT #ifdef CONFIG_ARM_LPAE mov r7, #1 << (54 - 32) @ XN #ifdef CONFIG_CPU_ENDIAN_BE8 str r7, [r0], #4 str r3, [r0], #4 #else str r3, [r0], #4 str r7, [r0], #4 #endif #else orr r3, r3, #PMD_SECT_XN str r3, [r0], #4 #endif #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ /* we don't need any serial debugging mappings */ ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags #endif #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) /* * If we're using the NetWinder or CATS, we also need to map * in the 16550-type serial port for the debug messages */ add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER) orr r3, r7, #0x7c000000 str r3, [r0] #endif #ifdef CONFIG_ARCH_RPC /* * Map in screen at 0x02000000 & SCREEN2_BASE * Similar reasons here - for debug. This is * only for Acorn RiscPC architectures. */ add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER) orr r3, r7, #0x02000000 str r3, [r0] add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) str r3, [r0] #endif #endif #ifdef CONFIG_ARM_LPAE sub r4, r4, #0x1000 @ point to the PGD table #endif mov pc, lr ENDPROC(__create_page_tables) .ltorg .align __turn_mmu_on_loc: .long . .long __turn_mmu_on .long __turn_mmu_on_end #if defined(CONFIG_SMP) __CPUINIT ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. * * Ensure that we're in SVC mode, and IRQs are disabled. Lookup * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install_secondary #endif safe_svcmode_maskall r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? moveq r0, #'p' @ yes, error 'p' THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p /* * Use the page tables supplied from __cpu_up. */ adr r4, __secondary_data ldmia r4, {r5, r7, r12} @ address to jump to after sub lr, r4, r5 @ mmu has been enabled ldr r4, [r7, lr] @ get secondary_data.pgdir add r7, r7, #4 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir adr lr, BSYM(__enable_mmu) @ return address mov r13, r12 @ __secondary_switched address ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor @ (return control reg) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) ENDPROC(secondary_startup) /* * r6 = &secondary_data */ ENTRY(__secondary_switched) ldr sp, [r7, #4] @ get secondary_data.stack mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) .align .type __secondary_data, %object __secondary_data: .long . .long secondary_data .long __secondary_switched #endif /* defined(CONFIG_SMP) */ /* * Setup common bits before finally enabling the MMU. Essentially * this is just loading the page table pointer and domain access * registers. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags or dtb pointer * r4 = page table pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion */ __enable_mmu: #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else bic r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C #endif #ifdef CONFIG_CPU_BPREDICT_DISABLE bic r0, r0, #CR_Z #endif #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif #ifdef CONFIG_ARM_LPAE mov r5, #0 mcrr p15, 0, r4, r5, c2 @ load TTBR0 #else mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer #endif b __turn_mmu_on ENDPROC(__enable_mmu) /* * Enable the MMU. This completely changes the structure of the visible * memory space. You will not be able to trace execution through this. * If you have an enquiry about this, *please* check the linux-arm-kernel * mailing list archives BEFORE sending another post to the list. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags or dtb pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion */ .align 5 .pushsection .idmap.text, "ax" ENTRY(__turn_mmu_on) mov r0, r0 instr_sync mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg instr_sync mov r3, r3 mov r3, r13 mov pc, r3 __turn_mmu_on_end: ENDPROC(__turn_mmu_on) .popsection #ifdef CONFIG_SMP_ON_UP __INIT __fixup_smp: and r3, r9, #0x000f0000 @ architecture version teq r3, #0x000f0000 @ CPU ID supported? bne __fixup_smp_on_up @ no, assume UP bic r3, r9, #0x00ff0000 bic r3, r3, #0x0000000f @ mask 0xff00fff0 mov r4, #0x41000000 orr r4, r4, #0x0000b000 orr r4, r4, #0x00000020 @ val 0x4100b020 teq r3, r4 @ ARM 11MPCore? moveq pc, lr @ yes, assume SMP mrc p15, 0, r0, c0, c0, 5 @ read MPIDR and r0, r0, #0xc0000000 @ multiprocessing extensions and teq r0, #0x80000000 @ not part of a uniprocessor system? moveq pc, lr @ yes, assume SMP __fixup_smp_on_up: adr r0, 1f ldmia r0, {r3 - r5} sub r3, r0, r3 add r4, r4, r3 add r5, r5, r3 b __do_fixup_smp_on_up ENDPROC(__fixup_smp) .align 1: .word . .word __smpalt_begin .word __smpalt_end .pushsection .data .globl smp_on_up smp_on_up: ALT_SMP(.long 1) ALT_UP(.long 0) .popsection #endif .text __do_fixup_smp_on_up: cmp r4, r5 movhs pc, lr ldmia r4!, {r0, r6} ARM( str r6, [r0, r3] ) THUMB( add r0, r0, r3 ) #ifdef __ARMEB__ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. #endif THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. THUMB( strh r6, [r0] ) b __do_fixup_smp_on_up ENDPROC(__do_fixup_smp_on_up) ENTRY(fixup_smp) stmfd sp!, {r4 - r6, lr} mov r4, r0 add r5, r0, r1 mov r3, #0 bl __do_fixup_smp_on_up ldmfd sp!, {r4 - r6, pc} ENDPROC(fixup_smp) #ifdef CONFIG_ARM_PATCH_PHYS_VIRT /* __fixup_pv_table - patch the stub instructions with the delta between * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and * can be expressed by an immediate shifter operand. The stub instruction * has a form of '(add|sub) rd, rn, #imm'. */ __HEAD __fixup_pv_table: adr r0, 1f ldmia r0, {r3-r5, r7} sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET add r4, r4, r3 @ adjust table start address add r5, r5, r3 @ adjust table end address add r7, r7, r3 @ adjust __pv_phys_offset address str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset mov r6, r3, lsr #24 @ constant for add/sub instructions teq r3, r6, lsl #24 @ must be 16MiB aligned THUMB( it ne @ cross section branch ) bne __error str r6, [r7, #4] @ save to __pv_offset b __fixup_a_pv_table ENDPROC(__fixup_pv_table) .align 1: .long . .long __pv_table_begin .long __pv_table_end 2: .long __pv_phys_offset .text __fixup_a_pv_table: #ifdef CONFIG_THUMB2_KERNEL lsls r6, #24 beq 2f clz r7, r6 lsr r6, #24 lsl r6, r7 bic r6, #0x0080 lsrs r7, #1 orrcs r6, #0x0080 orr r6, r6, r7, lsl #12 orr r6, #0x4000 b 2f 1: add r7, r3 ldrh ip, [r7, #2] and ip, 0x8f00 orr ip, r6 @ mask in offset bits 31-24 strh ip, [r7, #2] 2: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot bcc 1b bx lr #else b 2f 1: ldr ip, [r7, r3] bic ip, ip, #0x000000ff orr ip, ip, r6 @ mask in offset bits 31-24 str ip, [r7, r3] 2: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot bcc 1b mov pc, lr #endif ENDPROC(__fixup_a_pv_table) ENTRY(fixup_pv_table) stmfd sp!, {r4 - r7, lr} ldr r2, 2f @ get address of __pv_phys_offset mov r3, #0 @ no offset mov r4, r0 @ r0 = table start add r5, r0, r1 @ r1 = table size ldr r6, [r2, #4] @ get __pv_offset bl __fixup_a_pv_table ldmfd sp!, {r4 - r7, pc} ENDPROC(fixup_pv_table) .align 2: .long __pv_phys_offset .data .globl __pv_phys_offset .type __pv_phys_offset, %object __pv_phys_offset: .long 0 .size __pv_phys_offset, . - __pv_phys_offset __pv_offset: .long 0 #endif #include "head-common.S" 解释以上代码
09-24
# ============================================================================= # User configuration section. # # These options control compilation on all systems apart from Windows and Mac # OS X. Use CMake to compile on Windows and Mac. # # Largely, these are options that are designed to make mosquitto run more # easily in restrictive environments by removing features. # # Modify the variable below to enable/disable features. # # Can also be overriden at the command line, e.g.: # # make WITH_TLS=no # ============================================================================= # Uncomment to compile the broker with tcpd/libwrap support. #WITH_WRAP:=yes # Comment out to disable SSL/TLS support in the broker and client. # Disabling this will also mean that passwords must be stored in plain text. It # is strongly recommended that you only disable WITH_TLS if you are not using # password authentication at all. WITH_TLS:=no # Comment out to disable TLS/PSK support in the broker and client. Requires # WITH_TLS=yes. # This must be disabled if using openssl < 1.0. WITH_TLS_PSK:=no # Comment out to disable client threading support. WITH_THREADING:=yes # Comment out to remove bridge support from the broker. This allow the broker # to connect to other brokers and subscribe/publish to topics. You probably # want to leave this included unless you want to save a very small amount of # memory size and CPU time. WITH_BRIDGE:=yes # Comment out to remove persistent database support from the broker. This # allows the broker to store retained messages and durable subscriptions to a # file periodically and on shutdown. This is usually desirable (and is # suggested by the MQTT spec), but it can be disabled if required. WITH_PERSISTENCE:=yes # Comment out to remove memory tracking support from the broker. If disabled, # mosquitto won't track heap memory usage nor export '$SYS/broker/heap/current # size', but will use slightly less memory and CPU time. WITH_MEMORY_TRACKING:=yes # Compile with database upgrading support? If disabled, mosquitto won't # automatically upgrade old database versions. # Not currently supported. #WITH_DB_UPGRADE:=yes # Comment out to remove publishing of the $SYS topic hierarchy containing # information about the broker state. WITH_SYS_TREE:=yes # Build with systemd support. If enabled, mosquitto will notify systemd after # initialization. See README in service/systemd/ for more information. # Setting to yes means the libsystemd-dev or similar package will need to be # installed. WITH_SYSTEMD:=no # Build with SRV lookup support. WITH_SRV:=no # Build with websockets support on the broker. WITH_WEBSOCKETS:=no # Use elliptic keys in broker WITH_EC:=yes # Build man page documentation by default. WITH_DOCS:=yes # Build with client support for SOCK5 proxy. WITH_SOCKS:=yes # Strip executables and shared libraries on install. WITH_STRIP:=no # Build static libraries WITH_STATIC_LIBRARIES:=yes # Use this variable to add extra library dependencies when building the clients # with the static libmosquitto library. This may be required on some systems # where e.g. -lz or -latomic are needed for openssl. CLIENT_STATIC_LDADD:= # Build shared libraries WITH_SHARED_LIBRARIES:=no # Build with async dns lookup support for bridges (temporary). Requires glibc. #WITH_ADNS:=yes # Build with epoll support. WITH_EPOLL:=yes # Build with bundled uthash.h WITH_BUNDLED_DEPS:=yes # Build with coverage options WITH_COVERAGE:=no # Build with unix domain socket support WITH_UNIX_SOCKETS:=yes # Build mosquitto_sub with cJSON support WITH_CJSON:=no # Build mosquitto with support for the $CONTROL topics. WITH_CONTROL:=yes # 设置交叉编译器 CC = aarch64-none-linux-gnu-gcc AR = aarch64-none-linux-gnu-gcc RANLIB = aarch64-none-linux-gnu-gcc-ranlib # Build the broker with the jemalloc allocator WITH_JEMALLOC:=no # Build with xtreport capability. This is for debugging purposes and is # probably of no particular interest to end users. WITH_XTREPORT=no # Use the old O(n) keepalive check routine, instead of the new O(1) keepalive # check routine. See src/keepalive.c for notes on this. WITH_OLD_KEEPALIVE=no # Build using clang and with address sanitiser enabled WITH_ASAN=no # ============================================================================= # End of user configuration # ============================================================================= # Also bump lib/mosquitto.h, CMakeLists.txt, # installer/mosquitto.nsi, installer/mosquitto64.nsi VERSION=2.0.22 # Client library SO version. Bump if incompatible API/ABI changes are made. SOVERSION=1 # Man page generation requires xsltproc and docbook-xsl XSLTPROC=xsltproc --nonet # For html generation DB_HTML_XSL=man/html.xsl #MANCOUNTRIES=en_GB UNAME:=$(shell uname -s) ARCH:=$(shell uname -p) ifeq ($(UNAME),SunOS) ifeq ($(CC),cc) CFLAGS?=-O else CFLAGS?=-Wall -ggdb -O2 endif else CFLAGS?=-Wall -ggdb -O2 -Wconversion -Wextra endif ifeq ($(WITH_ASAN),yes) CC:=clang CFLAGS+=-fsanitize=address LDFLAGS+=-fsanitize=address endif STATIC_LIB_DEPS:= APP_CPPFLAGS=$(CPPFLAGS) -I. -I../../ -I../../include -I../../src -I../../lib APP_CFLAGS=$(CFLAGS) -DVERSION=\""${VERSION}\"" APP_LDFLAGS:=$(LDFLAGS) LIB_CPPFLAGS=$(CPPFLAGS) -I. -I.. -I../include -I../../include LIB_CFLAGS:=$(CFLAGS) LIB_CXXFLAGS:=$(CXXFLAGS) LIB_LDFLAGS:=$(LDFLAGS) LIB_LIBADD:=$(LIBADD) BROKER_CPPFLAGS:=$(LIB_CPPFLAGS) -I../lib BROKER_CFLAGS:=${CFLAGS} -DVERSION="\"${VERSION}\"" -DWITH_BROKER BROKER_LDFLAGS:=${LDFLAGS} BROKER_LDADD:= CLIENT_CPPFLAGS:=$(CPPFLAGS) -I.. -I../include CLIENT_CFLAGS:=${CFLAGS} -DVERSION="\"${VERSION}\"" CLIENT_LDFLAGS:=$(LDFLAGS) -L../lib CLIENT_LDADD:= PASSWD_LDADD:= PLUGIN_CPPFLAGS:=$(CPPFLAGS) -I../.. -I../../include PLUGIN_CFLAGS:=$(CFLAGS) -fPIC PLUGIN_LDFLAGS:=$(LDFLAGS) ifneq ($(or $(findstring $(UNAME),FreeBSD), $(findstring $(UNAME),OpenBSD), $(findstring $(UNAME),NetBSD)),) BROKER_LDADD:=$(BROKER_LDADD) -lm BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -Wl,--dynamic-list=linker.syms SEDINPLACE:=-i "" else BROKER_LDADD:=$(BROKER_LDADD) -ldl -lm SEDINPLACE:=-i endif ifeq ($(UNAME),Linux) BROKER_LDADD:=$(BROKER_LDADD) -lrt BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -Wl,--dynamic-list=linker.syms LIB_LIBADD:=$(LIB_LIBADD) -lrt endif ifeq ($(WITH_SHARED_LIBRARIES),yes) CLIENT_LDADD:=${CLIENT_LDADD} ../lib/libmosquitto.so.${SOVERSION} endif ifeq ($(UNAME),SunOS) SEDINPLACE:= ifeq ($(ARCH),sparc) ifeq ($(CC),cc) LIB_CFLAGS:=$(LIB_CFLAGS) -xc99 -KPIC else LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC endif endif ifeq ($(ARCH),i386) LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC endif ifeq ($(CXX),CC) LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -KPIC else LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -fPIC endif else LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -fPIC endif ifneq ($(UNAME),SunOS) LIB_LDFLAGS:=$(LIB_LDFLAGS) -Wl,--version-script=linker.version -Wl,-soname,libmosquitto.so.$(SOVERSION) endif ifeq ($(UNAME),QNX) BROKER_LDADD:=$(BROKER_LDADD) -lsocket LIB_LIBADD:=$(LIB_LIBADD) -lsocket endif ifeq ($(WITH_WRAP),yes) BROKER_LDADD:=$(BROKER_LDADD) -lwrap BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_WRAP endif ifeq ($(WITH_TLS),yes) APP_CPPFLAGS:=$(APP_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L BROKER_LDADD:=$(BROKER_LDADD) -lssl -lcrypto CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L LIB_LIBADD:=$(LIB_LIBADD) -lssl -lcrypto PASSWD_LDADD:=$(PASSWD_LDADD) -lcrypto STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -lssl -lcrypto ifeq ($(WITH_TLS_PSK),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_TLS_PSK LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_TLS_PSK CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_TLS_PSK endif endif ifeq ($(WITH_THREADING),yes) LIB_LDFLAGS:=$(LIB_LDFLAGS) -pthread LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_THREADING CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_THREADING STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -pthread endif ifeq ($(WITH_SOCKS),yes) LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_SOCKS CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_SOCKS endif ifeq ($(WITH_BRIDGE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_BRIDGE endif ifeq ($(WITH_PERSISTENCE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_PERSISTENCE endif ifeq ($(WITH_MEMORY_TRACKING),yes) ifneq ($(UNAME),SunOS) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_MEMORY_TRACKING endif endif ifeq ($(WITH_SYS_TREE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_SYS_TREE endif ifeq ($(WITH_SYSTEMD),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_SYSTEMD BROKER_LDADD:=$(BROKER_LDADD) -lsystemd endif ifeq ($(WITH_SRV),yes) LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_SRV LIB_LIBADD:=$(LIB_LIBADD) -lcares CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_SRV STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -lcares endif ifeq ($(UNAME),SunOS) BROKER_LDADD:=$(BROKER_LDADD) -lsocket -lnsl LIB_LIBADD:=$(LIB_LIBADD) -lsocket -lnsl endif ifeq ($(WITH_EC),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_EC endif ifeq ($(WITH_ADNS),yes) BROKER_LDADD:=$(BROKER_LDADD) -lanl BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_ADNS endif ifeq ($(WITH_CONTROL),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_CONTROL endif MAKE_ALL:=mosquitto ifeq ($(WITH_DOCS),yes) MAKE_ALL:=$(MAKE_ALL) docs endif ifeq ($(WITH_JEMALLOC),yes) BROKER_LDADD:=$(BROKER_LDADD) -ljemalloc endif ifeq ($(WITH_UNIX_SOCKETS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_UNIX_SOCKETS LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_UNIX_SOCKETS CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_UNIX_SOCKETS endif ifeq ($(WITH_WEBSOCKETS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_WEBSOCKETS BROKER_LDADD:=$(BROKER_LDADD) -lwebsockets endif INSTALL?=install prefix?=/usr/local incdir?=${prefix}/include libdir?=${prefix}/lib${LIB_SUFFIX} localedir?=${prefix}/share/locale mandir?=${prefix}/share/man STRIP?=strip ifeq ($(WITH_STRIP),yes) STRIP_OPTS?=-s --strip-program=${CROSS_COMPILE}${STRIP} endif ifeq ($(WITH_EPOLL),yes) ifeq ($(UNAME),Linux) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_EPOLL endif endif ifeq ($(WITH_BUNDLED_DEPS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -I../deps LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -I../deps PLUGIN_CPPFLAGS:=$(PLUGIN_CPPFLAGS) -I../../deps endif ifeq ($(WITH_COVERAGE),yes) BROKER_CFLAGS:=$(BROKER_CFLAGS) -coverage BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -coverage PLUGIN_CFLAGS:=$(PLUGIN_CFLAGS) -coverage PLUGIN_LDFLAGS:=$(PLUGIN_LDFLAGS) -coverage LIB_CFLAGS:=$(LIB_CFLAGS) -coverage LIB_LDFLAGS:=$(LIB_LDFLAGS) -coverage CLIENT_CFLAGS:=$(CLIENT_CFLAGS) -coverage CLIENT_LDFLAGS:=$(CLIENT_LDFLAGS) -coverage endif ifeq ($(WITH_CJSON),yes) CLIENT_CFLAGS:=$(CLIENT_CFLAGS) -DWITH_CJSON CLIENT_LDADD:=$(CLIENT_LDADD) -lcjson CLIENT_STATIC_LDADD:=$(CLIENT_STATIC_LDADD) -lcjson CLIENT_LDFLAGS:=$(CLIENT_LDFLAGS) endif ifeq ($(WITH_XTREPORT),yes) BROKER_CFLAGS:=$(BROKER_CFLAGS) -DWITH_XTREPORT endif ifeq ($(WITH_OLD_KEEPALIVE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_OLD_KEEPALIVE endif BROKER_LDADD:=${BROKER_LDADD} ${LDADD} CLIENT_LDADD:=${CLIENT_LDADD} ${LDADD} PASSWD_LDADD:=${PASSWD_LDADD} ${LDADD}# ============================================================================= # User configuration section. # # These options control compilation on all systems apart from Windows and Mac # OS X. Use CMake to compile on Windows and Mac. # # Largely, these are options that are designed to make mosquitto run more # easily in restrictive environments by removing features. # # Modify the variable below to enable/disable features. # # Can also be overriden at the command line, e.g.: # # make WITH_TLS=no # ============================================================================= # Uncomment to compile the broker with tcpd/libwrap support. #WITH_WRAP:=yes # Comment out to disable SSL/TLS support in the broker and client. # Disabling this will also mean that passwords must be stored in plain text. It # is strongly recommended that you only disable WITH_TLS if you are not using # password authentication at all. WITH_TLS:=no # Comment out to disable TLS/PSK support in the broker and client. Requires # WITH_TLS=yes. # This must be disabled if using openssl < 1.0. WITH_TLS_PSK:=no # Comment out to disable client threading support. WITH_THREADING:=yes # Comment out to remove bridge support from the broker. This allow the broker # to connect to other brokers and subscribe/publish to topics. You probably # want to leave this included unless you want to save a very small amount of # memory size and CPU time. WITH_BRIDGE:=yes # Comment out to remove persistent database support from the broker. This # allows the broker to store retained messages and durable subscriptions to a # file periodically and on shutdown. This is usually desirable (and is # suggested by the MQTT spec), but it can be disabled if required. WITH_PERSISTENCE:=yes # Comment out to remove memory tracking support from the broker. If disabled, # mosquitto won't track heap memory usage nor export '$SYS/broker/heap/current # size', but will use slightly less memory and CPU time. WITH_MEMORY_TRACKING:=yes # Compile with database upgrading support? If disabled, mosquitto won't # automatically upgrade old database versions. # Not currently supported. #WITH_DB_UPGRADE:=yes # Comment out to remove publishing of the $SYS topic hierarchy containing # information about the broker state. WITH_SYS_TREE:=yes # Build with systemd support. If enabled, mosquitto will notify systemd after # initialization. See README in service/systemd/ for more information. # Setting to yes means the libsystemd-dev or similar package will need to be # installed. WITH_SYSTEMD:=no # Build with SRV lookup support. WITH_SRV:=no # Build with websockets support on the broker. WITH_WEBSOCKETS:=no # Use elliptic keys in broker WITH_EC:=yes # Build man page documentation by default. WITH_DOCS:=yes # Build with client support for SOCK5 proxy. WITH_SOCKS:=yes # Strip executables and shared libraries on install. WITH_STRIP:=no # Build static libraries WITH_STATIC_LIBRARIES:=yes # Use this variable to add extra library dependencies when building the clients # with the static libmosquitto library. This may be required on some systems # where e.g. -lz or -latomic are needed for openssl. CLIENT_STATIC_LDADD:= # Build shared libraries WITH_SHARED_LIBRARIES:=no # Build with async dns lookup support for bridges (temporary). Requires glibc. #WITH_ADNS:=yes # Build with epoll support. WITH_EPOLL:=yes # Build with bundled uthash.h WITH_BUNDLED_DEPS:=yes # Build with coverage options WITH_COVERAGE:=no # Build with unix domain socket support WITH_UNIX_SOCKETS:=yes # Build mosquitto_sub with cJSON support WITH_CJSON:=no # Build mosquitto with support for the $CONTROL topics. WITH_CONTROL:=yes # 设置交叉编译器 CC = aarch64-none-linux-gnu-gcc AR = aarch64-none-linux-gnu-gcc RANLIB = aarch64-none-linux-gnu-gcc-ranlib # Build the broker with the jemalloc allocator WITH_JEMALLOC:=no # Build with xtreport capability. This is for debugging purposes and is # probably of no particular interest to end users. WITH_XTREPORT=no # Use the old O(n) keepalive check routine, instead of the new O(1) keepalive # check routine. See src/keepalive.c for notes on this. WITH_OLD_KEEPALIVE=no # Build using clang and with address sanitiser enabled WITH_ASAN=no # ============================================================================= # End of user configuration # ============================================================================= # Also bump lib/mosquitto.h, CMakeLists.txt, # installer/mosquitto.nsi, installer/mosquitto64.nsi VERSION=2.0.22 # Client library SO version. Bump if incompatible API/ABI changes are made. SOVERSION=1 # Man page generation requires xsltproc and docbook-xsl XSLTPROC=xsltproc --nonet # For html generation DB_HTML_XSL=man/html.xsl #MANCOUNTRIES=en_GB UNAME:=$(shell uname -s) ARCH:=$(shell uname -p) ifeq ($(UNAME),SunOS) ifeq ($(CC),cc) CFLAGS?=-O else CFLAGS?=-Wall -ggdb -O2 endif else CFLAGS?=-Wall -ggdb -O2 -Wconversion -Wextra endif ifeq ($(WITH_ASAN),yes) CC:=clang CFLAGS+=-fsanitize=address LDFLAGS+=-fsanitize=address endif STATIC_LIB_DEPS:= APP_CPPFLAGS=$(CPPFLAGS) -I. -I../../ -I../../include -I../../src -I../../lib APP_CFLAGS=$(CFLAGS) -DVERSION=\""${VERSION}\"" APP_LDFLAGS:=$(LDFLAGS) LIB_CPPFLAGS=$(CPPFLAGS) -I. -I.. -I../include -I../../include LIB_CFLAGS:=$(CFLAGS) LIB_CXXFLAGS:=$(CXXFLAGS) LIB_LDFLAGS:=$(LDFLAGS) LIB_LIBADD:=$(LIBADD) BROKER_CPPFLAGS:=$(LIB_CPPFLAGS) -I../lib BROKER_CFLAGS:=${CFLAGS} -DVERSION="\"${VERSION}\"" -DWITH_BROKER BROKER_LDFLAGS:=${LDFLAGS} BROKER_LDADD:= CLIENT_CPPFLAGS:=$(CPPFLAGS) -I.. -I../include CLIENT_CFLAGS:=${CFLAGS} -DVERSION="\"${VERSION}\"" CLIENT_LDFLAGS:=$(LDFLAGS) -L../lib CLIENT_LDADD:= PASSWD_LDADD:= PLUGIN_CPPFLAGS:=$(CPPFLAGS) -I../.. -I../../include PLUGIN_CFLAGS:=$(CFLAGS) -fPIC PLUGIN_LDFLAGS:=$(LDFLAGS) ifneq ($(or $(findstring $(UNAME),FreeBSD), $(findstring $(UNAME),OpenBSD), $(findstring $(UNAME),NetBSD)),) BROKER_LDADD:=$(BROKER_LDADD) -lm BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -Wl,--dynamic-list=linker.syms SEDINPLACE:=-i "" else BROKER_LDADD:=$(BROKER_LDADD) -ldl -lm SEDINPLACE:=-i endif ifeq ($(UNAME),Linux) BROKER_LDADD:=$(BROKER_LDADD) -lrt BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -Wl,--dynamic-list=linker.syms LIB_LIBADD:=$(LIB_LIBADD) -lrt endif ifeq ($(WITH_SHARED_LIBRARIES),yes) CLIENT_LDADD:=${CLIENT_LDADD} ../lib/libmosquitto.so.${SOVERSION} endif ifeq ($(UNAME),SunOS) SEDINPLACE:= ifeq ($(ARCH),sparc) ifeq ($(CC),cc) LIB_CFLAGS:=$(LIB_CFLAGS) -xc99 -KPIC else LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC endif endif ifeq ($(ARCH),i386) LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC endif ifeq ($(CXX),CC) LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -KPIC else LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -fPIC endif else LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -fPIC endif ifneq ($(UNAME),SunOS) LIB_LDFLAGS:=$(LIB_LDFLAGS) -Wl,--version-script=linker.version -Wl,-soname,libmosquitto.so.$(SOVERSION) endif ifeq ($(UNAME),QNX) BROKER_LDADD:=$(BROKER_LDADD) -lsocket LIB_LIBADD:=$(LIB_LIBADD) -lsocket endif ifeq ($(WITH_WRAP),yes) BROKER_LDADD:=$(BROKER_LDADD) -lwrap BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_WRAP endif ifeq ($(WITH_TLS),yes) APP_CPPFLAGS:=$(APP_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L BROKER_LDADD:=$(BROKER_LDADD) -lssl -lcrypto CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L LIB_LIBADD:=$(LIB_LIBADD) -lssl -lcrypto PASSWD_LDADD:=$(PASSWD_LDADD) -lcrypto STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -lssl -lcrypto ifeq ($(WITH_TLS_PSK),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_TLS_PSK LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_TLS_PSK CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_TLS_PSK endif endif ifeq ($(WITH_THREADING),yes) LIB_LDFLAGS:=$(LIB_LDFLAGS) -pthread LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_THREADING CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_THREADING STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -pthread endif ifeq ($(WITH_SOCKS),yes) LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_SOCKS CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_SOCKS endif ifeq ($(WITH_BRIDGE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_BRIDGE endif ifeq ($(WITH_PERSISTENCE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_PERSISTENCE endif ifeq ($(WITH_MEMORY_TRACKING),yes) ifneq ($(UNAME),SunOS) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_MEMORY_TRACKING endif endif ifeq ($(WITH_SYS_TREE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_SYS_TREE endif ifeq ($(WITH_SYSTEMD),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_SYSTEMD BROKER_LDADD:=$(BROKER_LDADD) -lsystemd endif ifeq ($(WITH_SRV),yes) LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_SRV LIB_LIBADD:=$(LIB_LIBADD) -lcares CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_SRV STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -lcares endif ifeq ($(UNAME),SunOS) BROKER_LDADD:=$(BROKER_LDADD) -lsocket -lnsl LIB_LIBADD:=$(LIB_LIBADD) -lsocket -lnsl endif ifeq ($(WITH_EC),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_EC endif ifeq ($(WITH_ADNS),yes) BROKER_LDADD:=$(BROKER_LDADD) -lanl BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_ADNS endif ifeq ($(WITH_CONTROL),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_CONTROL endif MAKE_ALL:=mosquitto ifeq ($(WITH_DOCS),yes) MAKE_ALL:=$(MAKE_ALL) docs endif ifeq ($(WITH_JEMALLOC),yes) BROKER_LDADD:=$(BROKER_LDADD) -ljemalloc endif ifeq ($(WITH_UNIX_SOCKETS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_UNIX_SOCKETS LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_UNIX_SOCKETS CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_UNIX_SOCKETS endif ifeq ($(WITH_WEBSOCKETS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_WEBSOCKETS BROKER_LDADD:=$(BROKER_LDADD) -lwebsockets endif INSTALL?=install prefix?=/usr/local incdir?=${prefix}/include libdir?=${prefix}/lib${LIB_SUFFIX} localedir?=${prefix}/share/locale mandir?=${prefix}/share/man STRIP?=strip ifeq ($(WITH_STRIP),yes) STRIP_OPTS?=-s --strip-program=${CROSS_COMPILE}${STRIP} endif ifeq ($(WITH_EPOLL),yes) ifeq ($(UNAME),Linux) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_EPOLL endif endif ifeq ($(WITH_BUNDLED_DEPS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -I../deps LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -I../deps PLUGIN_CPPFLAGS:=$(PLUGIN_CPPFLAGS) -I../../deps endif ifeq ($(WITH_COVERAGE),yes) BROKER_CFLAGS:=$(BROKER_CFLAGS) -coverage BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -coverage PLUGIN_CFLAGS:=$(PLUGIN_CFLAGS) -coverage PLUGIN_LDFLAGS:=$(PLUGIN_LDFLAGS) -coverage LIB_CFLAGS:=$(LIB_CFLAGS) -coverage LIB_LDFLAGS:=$(LIB_LDFLAGS) -coverage CLIENT_CFLAGS:=$(CLIENT_CFLAGS) -coverage CLIENT_LDFLAGS:=$(CLIENT_LDFLAGS) -coverage endif ifeq ($(WITH_CJSON),yes) CLIENT_CFLAGS:=$(CLIENT_CFLAGS) -DWITH_CJSON CLIENT_LDADD:=$(CLIENT_LDADD) -lcjson CLIENT_STATIC_LDADD:=$(CLIENT_STATIC_LDADD) -lcjson CLIENT_LDFLAGS:=$(CLIENT_LDFLAGS) endif ifeq ($(WITH_XTREPORT),yes) BROKER_CFLAGS:=$(BROKER_CFLAGS) -DWITH_XTREPORT endif ifeq ($(WITH_OLD_KEEPALIVE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_OLD_KEEPALIVE endif BROKER_LDADD:=${BROKER_LDADD} ${LDADD} CLIENT_LDADD:=${CLIENT_LDADD} ${LDADD} PASSWD_LDADD:=${PASSWD_LDADD} ${LDADD}# ============================================================================= # User configuration section. # # These options control compilation on all systems apart from Windows and Mac # OS X. Use CMake to compile on Windows and Mac. # # Largely, these are options that are designed to make mosquitto run more # easily in restrictive environments by removing features. # # Modify the variable below to enable/disable features. # # Can also be overriden at the command line, e.g.: # # make WITH_TLS=no # ============================================================================= # Uncomment to compile the broker with tcpd/libwrap support. #WITH_WRAP:=yes # Comment out to disable SSL/TLS support in the broker and client. # Disabling this will also mean that passwords must be stored in plain text. It # is strongly recommended that you only disable WITH_TLS if you are not using # password authentication at all. WITH_TLS:=no # Comment out to disable TLS/PSK support in the broker and client. Requires # WITH_TLS=yes. # This must be disabled if using openssl < 1.0. WITH_TLS_PSK:=no # Comment out to disable client threading support. WITH_THREADING:=yes # Comment out to remove bridge support from the broker. This allow the broker # to connect to other brokers and subscribe/publish to topics. You probably # want to leave this included unless you want to save a very small amount of # memory size and CPU time. WITH_BRIDGE:=yes # Comment out to remove persistent database support from the broker. This # allows the broker to store retained messages and durable subscriptions to a # file periodically and on shutdown. This is usually desirable (and is # suggested by the MQTT spec), but it can be disabled if required. WITH_PERSISTENCE:=yes # Comment out to remove memory tracking support from the broker. If disabled, # mosquitto won't track heap memory usage nor export '$SYS/broker/heap/current # size', but will use slightly less memory and CPU time. WITH_MEMORY_TRACKING:=yes # Compile with database upgrading support? If disabled, mosquitto won't # automatically upgrade old database versions. # Not currently supported. #WITH_DB_UPGRADE:=yes # Comment out to remove publishing of the $SYS topic hierarchy containing # information about the broker state. WITH_SYS_TREE:=yes # Build with systemd support. If enabled, mosquitto will notify systemd after # initialization. See README in service/systemd/ for more information. # Setting to yes means the libsystemd-dev or similar package will need to be # installed. WITH_SYSTEMD:=no # Build with SRV lookup support. WITH_SRV:=no # Build with websockets support on the broker. WITH_WEBSOCKETS:=no # Use elliptic keys in broker WITH_EC:=yes # Build man page documentation by default. WITH_DOCS:=yes # Build with client support for SOCK5 proxy. WITH_SOCKS:=yes # Strip executables and shared libraries on install. WITH_STRIP:=no # Build static libraries WITH_STATIC_LIBRARIES:=yes # Use this variable to add extra library dependencies when building the clients # with the static libmosquitto library. This may be required on some systems # where e.g. -lz or -latomic are needed for openssl. CLIENT_STATIC_LDADD:= # Build shared libraries WITH_SHARED_LIBRARIES:=no # Build with async dns lookup support for bridges (temporary). Requires glibc. #WITH_ADNS:=yes # Build with epoll support. WITH_EPOLL:=yes # Build with bundled uthash.h WITH_BUNDLED_DEPS:=yes # Build with coverage options WITH_COVERAGE:=no # Build with unix domain socket support WITH_UNIX_SOCKETS:=yes # Build mosquitto_sub with cJSON support WITH_CJSON:=no # Build mosquitto with support for the $CONTROL topics. WITH_CONTROL:=yes # 设置交叉编译器 CC = aarch64-none-linux-gnu-gcc AR = aarch64-none-linux-gnu-gcc RANLIB = aarch64-none-linux-gnu-gcc-ranlib # Build the broker with the jemalloc allocator WITH_JEMALLOC:=no # Build with xtreport capability. This is for debugging purposes and is # probably of no particular interest to end users. WITH_XTREPORT=no # Use the old O(n) keepalive check routine, instead of the new O(1) keepalive # check routine. See src/keepalive.c for notes on this. WITH_OLD_KEEPALIVE=no # Build using clang and with address sanitiser enabled WITH_ASAN=no # ============================================================================= # End of user configuration # ============================================================================= # Also bump lib/mosquitto.h, CMakeLists.txt, # installer/mosquitto.nsi, installer/mosquitto64.nsi VERSION=2.0.22 # Client library SO version. Bump if incompatible API/ABI changes are made. SOVERSION=1 # Man page generation requires xsltproc and docbook-xsl XSLTPROC=xsltproc --nonet # For html generation DB_HTML_XSL=man/html.xsl #MANCOUNTRIES=en_GB UNAME:=$(shell uname -s) ARCH:=$(shell uname -p) ifeq ($(UNAME),SunOS) ifeq ($(CC),cc) CFLAGS?=-O else CFLAGS?=-Wall -ggdb -O2 endif else CFLAGS?=-Wall -ggdb -O2 -Wconversion -Wextra endif ifeq ($(WITH_ASAN),yes) CC:=clang CFLAGS+=-fsanitize=address LDFLAGS+=-fsanitize=address endif STATIC_LIB_DEPS:= APP_CPPFLAGS=$(CPPFLAGS) -I. -I../../ -I../../include -I../../src -I../../lib APP_CFLAGS=$(CFLAGS) -DVERSION=\""${VERSION}\"" APP_LDFLAGS:=$(LDFLAGS) LIB_CPPFLAGS=$(CPPFLAGS) -I. -I.. -I../include -I../../include LIB_CFLAGS:=$(CFLAGS) LIB_CXXFLAGS:=$(CXXFLAGS) LIB_LDFLAGS:=$(LDFLAGS) LIB_LIBADD:=$(LIBADD) BROKER_CPPFLAGS:=$(LIB_CPPFLAGS) -I../lib BROKER_CFLAGS:=${CFLAGS} -DVERSION="\"${VERSION}\"" -DWITH_BROKER BROKER_LDFLAGS:=${LDFLAGS} BROKER_LDADD:= CLIENT_CPPFLAGS:=$(CPPFLAGS) -I.. -I../include CLIENT_CFLAGS:=${CFLAGS} -DVERSION="\"${VERSION}\"" CLIENT_LDFLAGS:=$(LDFLAGS) -L../lib CLIENT_LDADD:= PASSWD_LDADD:= PLUGIN_CPPFLAGS:=$(CPPFLAGS) -I../.. -I../../include PLUGIN_CFLAGS:=$(CFLAGS) -fPIC PLUGIN_LDFLAGS:=$(LDFLAGS) ifneq ($(or $(findstring $(UNAME),FreeBSD), $(findstring $(UNAME),OpenBSD), $(findstring $(UNAME),NetBSD)),) BROKER_LDADD:=$(BROKER_LDADD) -lm BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -Wl,--dynamic-list=linker.syms SEDINPLACE:=-i "" else BROKER_LDADD:=$(BROKER_LDADD) -ldl -lm SEDINPLACE:=-i endif ifeq ($(UNAME),Linux) BROKER_LDADD:=$(BROKER_LDADD) -lrt BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -Wl,--dynamic-list=linker.syms LIB_LIBADD:=$(LIB_LIBADD) -lrt endif ifeq ($(WITH_SHARED_LIBRARIES),yes) CLIENT_LDADD:=${CLIENT_LDADD} ../lib/libmosquitto.so.${SOVERSION} endif ifeq ($(UNAME),SunOS) SEDINPLACE:= ifeq ($(ARCH),sparc) ifeq ($(CC),cc) LIB_CFLAGS:=$(LIB_CFLAGS) -xc99 -KPIC else LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC endif endif ifeq ($(ARCH),i386) LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC endif ifeq ($(CXX),CC) LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -KPIC else LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -fPIC endif else LIB_CFLAGS:=$(LIB_CFLAGS) -fPIC LIB_CXXFLAGS:=$(LIB_CXXFLAGS) -fPIC endif ifneq ($(UNAME),SunOS) LIB_LDFLAGS:=$(LIB_LDFLAGS) -Wl,--version-script=linker.version -Wl,-soname,libmosquitto.so.$(SOVERSION) endif ifeq ($(UNAME),QNX) BROKER_LDADD:=$(BROKER_LDADD) -lsocket LIB_LIBADD:=$(LIB_LIBADD) -lsocket endif ifeq ($(WITH_WRAP),yes) BROKER_LDADD:=$(BROKER_LDADD) -lwrap BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_WRAP endif ifeq ($(WITH_TLS),yes) APP_CPPFLAGS:=$(APP_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L BROKER_LDADD:=$(BROKER_LDADD) -lssl -lcrypto CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_TLS -DOPENSSL_API_COMPAT=0x10100000L LIB_LIBADD:=$(LIB_LIBADD) -lssl -lcrypto PASSWD_LDADD:=$(PASSWD_LDADD) -lcrypto STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -lssl -lcrypto ifeq ($(WITH_TLS_PSK),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_TLS_PSK LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_TLS_PSK CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_TLS_PSK endif endif ifeq ($(WITH_THREADING),yes) LIB_LDFLAGS:=$(LIB_LDFLAGS) -pthread LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_THREADING CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_THREADING STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -pthread endif ifeq ($(WITH_SOCKS),yes) LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_SOCKS CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_SOCKS endif ifeq ($(WITH_BRIDGE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_BRIDGE endif ifeq ($(WITH_PERSISTENCE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_PERSISTENCE endif ifeq ($(WITH_MEMORY_TRACKING),yes) ifneq ($(UNAME),SunOS) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_MEMORY_TRACKING endif endif ifeq ($(WITH_SYS_TREE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_SYS_TREE endif ifeq ($(WITH_SYSTEMD),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_SYSTEMD BROKER_LDADD:=$(BROKER_LDADD) -lsystemd endif ifeq ($(WITH_SRV),yes) LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_SRV LIB_LIBADD:=$(LIB_LIBADD) -lcares CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_SRV STATIC_LIB_DEPS:=$(STATIC_LIB_DEPS) -lcares endif ifeq ($(UNAME),SunOS) BROKER_LDADD:=$(BROKER_LDADD) -lsocket -lnsl LIB_LIBADD:=$(LIB_LIBADD) -lsocket -lnsl endif ifeq ($(WITH_EC),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_EC endif ifeq ($(WITH_ADNS),yes) BROKER_LDADD:=$(BROKER_LDADD) -lanl BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_ADNS endif ifeq ($(WITH_CONTROL),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_CONTROL endif MAKE_ALL:=mosquitto ifeq ($(WITH_DOCS),yes) MAKE_ALL:=$(MAKE_ALL) docs endif ifeq ($(WITH_JEMALLOC),yes) BROKER_LDADD:=$(BROKER_LDADD) -ljemalloc endif ifeq ($(WITH_UNIX_SOCKETS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_UNIX_SOCKETS LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -DWITH_UNIX_SOCKETS CLIENT_CPPFLAGS:=$(CLIENT_CPPFLAGS) -DWITH_UNIX_SOCKETS endif ifeq ($(WITH_WEBSOCKETS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_WEBSOCKETS BROKER_LDADD:=$(BROKER_LDADD) -lwebsockets endif INSTALL?=install prefix?=/usr/local incdir?=${prefix}/include libdir?=${prefix}/lib${LIB_SUFFIX} localedir?=${prefix}/share/locale mandir?=${prefix}/share/man STRIP?=strip ifeq ($(WITH_STRIP),yes) STRIP_OPTS?=-s --strip-program=${CROSS_COMPILE}${STRIP} endif ifeq ($(WITH_EPOLL),yes) ifeq ($(UNAME),Linux) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_EPOLL endif endif ifeq ($(WITH_BUNDLED_DEPS),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -I../deps LIB_CPPFLAGS:=$(LIB_CPPFLAGS) -I../deps PLUGIN_CPPFLAGS:=$(PLUGIN_CPPFLAGS) -I../../deps endif ifeq ($(WITH_COVERAGE),yes) BROKER_CFLAGS:=$(BROKER_CFLAGS) -coverage BROKER_LDFLAGS:=$(BROKER_LDFLAGS) -coverage PLUGIN_CFLAGS:=$(PLUGIN_CFLAGS) -coverage PLUGIN_LDFLAGS:=$(PLUGIN_LDFLAGS) -coverage LIB_CFLAGS:=$(LIB_CFLAGS) -coverage LIB_LDFLAGS:=$(LIB_LDFLAGS) -coverage CLIENT_CFLAGS:=$(CLIENT_CFLAGS) -coverage CLIENT_LDFLAGS:=$(CLIENT_LDFLAGS) -coverage endif ifeq ($(WITH_CJSON),yes) CLIENT_CFLAGS:=$(CLIENT_CFLAGS) -DWITH_CJSON CLIENT_LDADD:=$(CLIENT_LDADD) -lcjson CLIENT_STATIC_LDADD:=$(CLIENT_STATIC_LDADD) -lcjson CLIENT_LDFLAGS:=$(CLIENT_LDFLAGS) endif ifeq ($(WITH_XTREPORT),yes) BROKER_CFLAGS:=$(BROKER_CFLAGS) -DWITH_XTREPORT endif ifeq ($(WITH_OLD_KEEPALIVE),yes) BROKER_CPPFLAGS:=$(BROKER_CPPFLAGS) -DWITH_OLD_KEEPALIVE endif BROKER_LDADD:=${BROKER_LDADD} ${LDADD} CLIENT_LDADD:=${CLIENT_LDADD} ${LDADD} PASSWD_LDADD:=${PASSWD_LDADD} ${LDADD}
09-24
hi309a /lib/udrivers # ping 192.168.0.10 PING 192.168.0.10 (192.168.0.10): 56 data bytes [ 275.201882] ------------[ cut here ]------------ [ 275.206528] NETDEV WATCHDOG: eth0 (r8169): transmit queue 0 timed out [ 275.213041] WARNING: CPU: 1 PID: 0 at net/sched/sch_generic.c:467 dev_watchdo g+0x378/0x380 [ 275.221338] Modules linked in: hi309a_pcie_api_test(O) r8169 realtek hi309a_m ii_mac(O) hi309a_watchdog(O) hi309a_djtag(O) ahci_platform libahci_platform liba hci libata usb_storage sd_mod scsi_mod xhci_plat_hcd hi309a_usb_hisi(O) xhci_hcd usbcore hi309a_lbc(O) hi309a_tsensor(O) hi309a_log(O) hi309a_sata(O) hi309a_pci e(O) hi309a_serdes(O) hi309a_sfc(O) hi309a_pmbus(O) hi309a_i2c(O) hi309a_spi(O) hi309a_gpio(O) hi309a_pinctrl(O) hi309a_subctrl(O) ksecurec(O) [ 275.261876] CPU: 1 PID: 0 Comm: swapper/1 Tainted: G O 5.10.0 #2 [ 275.269213] Hardware name: Hisilicon PhosphorHi1230 EMU (DT) [ 275.274896] pstate: 60400009 (nZCv daif +PAN -UAO -TCO BTYPE=--) [ 275.280926] pc : dev_watchdog+0x378/0x380 [ 275.284949] lr : dev_watchdog+0x378/0x380 [ 275.288972] sp : ffff800011d43d50 [ 275.292299] x29: ffff800011d43d50 x28: 0000000000000000 [ 275.297632] x27: 0000000000000001 x26: 0000000000000004 [ 275.302965] x25: 00000000000001c0 x24: 00000000ffffffff [ 275.308299] x23: 0000000000000001 x22: ffff0004c9c18440 [ 275.313632] x21: ffff8000119b6000 x20: ffff0004c9c18000 [ 275.318965] x19: 0000000000000000 x18: 0000000000000006 [ 275.324299] x17: ffff80001155d438 x16: ffff0005fefde4c0 [ 275.329632] x15: ffff800011d43870 x14: ffff800091d43a27 [ 275.334966] x13: ffff8000119d01d8 x12: 00000000000009b1 [ 275.340300] x11: 000000000000033b x10: ffff800011a281d8 [ 275.345633] x9 : ffff8000119d01d8 x8 : 00000000ffffefff [ 275.350966] x7 : ffff800011a281d8 x6 : 80000000fffff000 [ 275.356300] x5 : 000000000000bff4 x4 : 0000000000000000 [ 275.361633] x3 : 0000000000000000 x2 : 0000000000000000 [ 275.366967] x1 : 0000000000000000 x0 : ffff0004c00faac0 [ 275.372302] Call trace: [ 275.374758] dev_watchdog+0x378/0x380 [ 275.378437] call_timer_fn.constprop.0+0x24/0x80 [ 275.383072] expire_timers+0xf8/0x11c [ 275.386748] run_timer_softirq+0x138/0x370 [ 275.390860] __do_softirq+0x114/0x268 [ 275.394537] irq_exit+0xec/0xf4 [ 275.397691] __handle_domain_irq+0x80/0xe0 [ 275.401802] gic_handle_irq+0xc0/0x140 [ 275.405564] el1_irq+0xac/0x140 [ 275.408719] arch_cpu_idle+0x18/0x30 [ 275.412307] default_idle_call+0x38/0xb0 [ 275.416246] do_idle+0x24c/0x320 [ 275.419485] cpu_startup_entry+0x24/0x60 [ 275.423424] secondary_start_kernel+0x148/0x194 [ 275.427972] ---[ end trace 2186400af976ddfb ]---
09-24
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值