summaryrefslogtreecommitdiffstats
path: root/import-layers/meta-virtualization/recipes-extended/xen
diff options
context:
space:
mode:
Diffstat (limited to 'import-layers/meta-virtualization/recipes-extended/xen')
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch176
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch109
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch36
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/lwip.dhcp_create_request-hwaddr_len.patch13
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/lwip.patch-cvs2398
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/mini-os_udivmoddi4-gcc7.patch43
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/newlib-chk.patch155
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/newlib-stdint-size_max-fix-from-1.17.0.patch16
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/newlib.patch727
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/polarssl.patch64
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/tpmemu-0.7.4.patch12
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-bufsize.patch13
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-cmake-Wextra.patch21
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-deepquote-anyloc.patch127
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-deepquote.patch187
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-implicit-fallthrough.patch10
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-locality.patch50
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-parent-sign-ek.patch196
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/xsa246-4.9.patch74
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/xsa248.patch164
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/xsa249.patch42
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/xsa250.patch67
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/xsa251.patch21
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/xsa253.patch26
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/lwip.inc24
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/lwip_1.3.0.bb19
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/mini-os.inc28
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/mini-os_4.9.0.bb18
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/newlib.inc64
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/newlib_1.16.0.bb21
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/polarssl.inc27
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/polarssl_1.1.4.bb19
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/stubdom-gmp.inc37
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/stubdom-gmp_4.3.2.bb20
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/stubdom.inc152
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/tpm-emulator.inc37
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/tpm-emulator_0.7.4.bb26
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen-vtpm.inc111
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen-vtpm_4.9.0.bb21
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen.inc21
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen_4.10.0.bb12
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.0.bb12
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.1.bb18
43 files changed, 5378 insertions, 56 deletions
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
new file mode 100644
index 000000000..ad9524a30
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
@@ -0,0 +1,176 @@
+From ad208b8b7e45fb2b7c572b86c61c26412609e82d Mon Sep 17 00:00:00 2001
+From: George Dunlap <george.dunlap@citrix.com>
+Date: Fri, 10 Nov 2017 16:53:54 +0000
+Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually
+ worked
+
+The PoD zero-check functions speculatively remove memory from the p2m,
+then check to see if it's completely zeroed, before putting it in the
+cache.
+
+Unfortunately, the p2m_set_entry() calls may fail if the underlying
+pagetable structure needs to change and the domain has exhausted its
+p2m memory pool: for instance, if we're removing a 2MiB region out of
+a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
+region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
+case); and the return value is not checked.
+
+The underlying mfn will then be added into the PoD cache, and at some
+point mapped into another location in the p2m. If the guest
+afterwards ballons out this memory, it will be freed to the hypervisor
+and potentially reused by another domain, in spite of the fact that
+the original domain still has writable mappings to it.
+
+There are several places where p2m_set_entry() shouldn't be able to
+fail, as it is guaranteed to write an entry of the same order that
+succeeded before. Add a backstop of crashing the domain just in case,
+and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
+builds.
+
+While we're here, use PAGE_ORDER_2M rather than a magic constant.
+
+This is part of XSA-247.
+
+Reported-by: George Dunlap <george.dunlap.com>
+Signed-off-by: George Dunlap <george.dunlap@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+---
+v4:
+- Removed some training whitespace
+v3:
+- Reformat reset clause to be more compact
+- Make sure to set map[i] = NULL when unmapping in case we need to bail
+v2:
+- Crash a domain if a p2m_set_entry we think cannot fail fails anyway.
+---
+ xen/arch/x86/mm/p2m-pod.c | 77 +++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 61 insertions(+), 16 deletions(-)
+
+diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
+index 730a48f928..f2ed751892 100644
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+@@ -752,8 +752,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
+ }
+
+ /* Try to remove the page, restoring old mapping if it fails. */
+- p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
+- p2m_populate_on_demand, p2m->default_access);
++ if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
++ p2m_populate_on_demand, p2m->default_access) )
++ goto out;
++
+ p2m_tlb_flush_sync(p2m);
+
+ /* Make none of the MFNs are used elsewhere... for example, mapped
+@@ -810,9 +812,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
+ ret = SUPERPAGE_PAGES;
+
+ out_reset:
+- if ( reset )
+- p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
+-
++ /*
++ * This p2m_set_entry() call shouldn't be able to fail, since the same order
++ * on the same gfn succeeded above. If that turns out to be false, crashing
++ * the domain should be the safest way of making sure we don't leak memory.
++ */
++ if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
++ type0, p2m->default_access) )
++ {
++ ASSERT_UNREACHABLE();
++ domain_crash(d);
++ }
++
+ out:
+ gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
+ return ret;
+@@ -869,19 +880,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
+ }
+
+ /* Try to remove the page, restoring old mapping if it fails. */
+- p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
+- p2m_populate_on_demand, p2m->default_access);
++ if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
++ p2m_populate_on_demand, p2m->default_access) )
++ goto skip;
+
+ /* See if the page was successfully unmapped. (Allow one refcount
+ * for being allocated to a domain.) */
+ if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
+ {
++ /*
++ * If the previous p2m_set_entry call succeeded, this one shouldn't
++ * be able to fail. If it does, crashing the domain should be safe.
++ */
++ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
++ types[i], p2m->default_access) )
++ {
++ ASSERT_UNREACHABLE();
++ domain_crash(d);
++ goto out_unmap;
++ }
++
++ skip:
+ unmap_domain_page(map[i]);
+ map[i] = NULL;
+
+- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+- types[i], p2m->default_access);
+-
+ continue;
+ }
+ }
+@@ -900,12 +922,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
+
+ unmap_domain_page(map[i]);
+
+- /* See comment in p2m_pod_zero_check_superpage() re gnttab
+- * check timing. */
+- if ( j < PAGE_SIZE/sizeof(*map[i]) )
++ map[i] = NULL;
++
++ /*
++ * See comment in p2m_pod_zero_check_superpage() re gnttab
++ * check timing.
++ */
++ if ( j < (PAGE_SIZE / sizeof(*map[i])) )
+ {
+- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+- types[i], p2m->default_access);
++ /*
++ * If the previous p2m_set_entry call succeeded, this one shouldn't
++ * be able to fail. If it does, crashing the domain should be safe.
++ */
++ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
++ types[i], p2m->default_access) )
++ {
++ ASSERT_UNREACHABLE();
++ domain_crash(d);
++ goto out_unmap;
++ }
+ }
+ else
+ {
+@@ -929,7 +964,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
+ p2m->pod.entry_count++;
+ }
+ }
+-
++
++ return;
++
++out_unmap:
++ /*
++ * Something went wrong, probably crashing the domain. Unmap
++ * everything and return.
++ */
++ for ( i = 0; i < count; i++ )
++ if ( map[i] )
++ unmap_domain_page(map[i]);
+ }
+
+ #define POD_SWEEP_LIMIT 1024
+--
+2.15.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
new file mode 100644
index 000000000..8c850bd7f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
@@ -0,0 +1,109 @@
+From d4bc7833707351a5341a6bdf04c752a028d9560d Mon Sep 17 00:00:00 2001
+From: George Dunlap <george.dunlap@citrix.com>
+Date: Fri, 10 Nov 2017 16:53:55 +0000
+Subject: [PATCH 2/2] p2m: Check return value of p2m_set_entry() when
+ decreasing reservation
+
+If the entire range specified to p2m_pod_decrease_reservation() is marked
+populate-on-demand, then it will make a single p2m_set_entry() call,
+reducing its PoD entry count.
+
+Unfortunately, in the right circumstances, this p2m_set_entry() call
+may fail. It that case, repeated calls to decrease_reservation() may
+cause p2m->pod.entry_count to fall below zero, potentially tripping
+over BUG_ON()s to the contrary.
+
+Instead, check to see if the entry succeeded, and return false if not.
+The caller will then call guest_remove_page() on the gfns, which will
+return -EINVAL upon finding no valid memory there to return.
+
+Unfortunately if the order > 0, the entry may have partially changed.
+A domain_crash() is probably the safest thing in that case.
+
+Other p2m_set_entry() calls in the same function should be fine,
+because they are writing the entry at its current order. Nonetheless,
+check the return value and crash if our assumption turns otu to be
+wrong.
+
+This is part of XSA-247.
+
+Reported-by: George Dunlap <george.dunlap.com>
+Signed-off-by: George Dunlap <george.dunlap@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+---
+v2: Crash the domain if we're not sure it's safe (or if we think it
+can't happen)
+---
+ xen/arch/x86/mm/p2m-pod.c | 42 +++++++++++++++++++++++++++++++++---------
+ 1 file changed, 33 insertions(+), 9 deletions(-)
+
+diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
+index f2ed751892..473d6a6dbf 100644
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+@@ -555,11 +555,23 @@ p2m_pod_decrease_reservation(struct domain *d,
+
+ if ( !nonpod )
+ {
+- /* All PoD: Mark the whole region invalid and tell caller
+- * we're done. */
+- p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
+- p2m->default_access);
+- p2m->pod.entry_count-=(1<<order);
++ /*
++ * All PoD: Mark the whole region invalid and tell caller
++ * we're done.
++ */
++ if ( p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
++ p2m->default_access) )
++ {
++ /*
++ * If this fails, we can't tell how much of the range was changed.
++ * Best to crash the domain unless we're sure a partial change is
++ * impossible.
++ */
++ if ( order != 0 )
++ domain_crash(d);
++ goto out_unlock;
++ }
++ p2m->pod.entry_count -= 1UL << order;
+ BUG_ON(p2m->pod.entry_count < 0);
+ ret = 1;
+ goto out_entry_check;
+@@ -600,8 +612,14 @@ p2m_pod_decrease_reservation(struct domain *d,
+ n = 1UL << cur_order;
+ if ( t == p2m_populate_on_demand )
+ {
+- p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
+- p2m_invalid, p2m->default_access);
++ /* This shouldn't be able to fail */
++ if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
++ p2m_invalid, p2m->default_access) )
++ {
++ ASSERT_UNREACHABLE();
++ domain_crash(d);
++ goto out_unlock;
++ }
+ p2m->pod.entry_count -= n;
+ BUG_ON(p2m->pod.entry_count < 0);
+ pod -= n;
+@@ -622,8 +640,14 @@ p2m_pod_decrease_reservation(struct domain *d,
+
+ page = mfn_to_page(mfn);
+
+- p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
+- p2m_invalid, p2m->default_access);
++ /* This shouldn't be able to fail */
++ if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
++ p2m_invalid, p2m->default_access) )
++ {
++ ASSERT_UNREACHABLE();
++ domain_crash(d);
++ goto out_unlock;
++ }
+ p2m_tlb_flush_sync(p2m);
+ for ( j = 0; j < n; ++j )
+ set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+--
+2.15.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch
deleted file mode 100644
index 05016a7a3..000000000
--- a/import-layers/meta-virtualization/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-commit 88bfbf90e35f1213f9967a97dee0b2039f9998a4
-Author: Bernd Kuhls <bernd.kuhls@t-online.de>
-Date: Sat Aug 19 16:21:42 2017 +0200
-
- tools/libxc/xc_dom_arm: add missing variable initialization
-
- The variable domctl.u.address_size.size may remain uninitialized if
- guest_type is not one of xen-3.0-aarch64 or xen-3.0-armv7l. And the
- code precisely checks if this variable is still 0 to decide if the
- guest type is supported or not.
-
- This fixes the following build failure with gcc 7.x:
-
- xc_dom_arm.c:229:31: error: 'domctl.u.address_size.size' may be used uninitialized in this function [-Werror=maybe-uninitialized]
- if ( domctl.u.address_size.size == 0 )
-
- Patch originally taken from
- https://www.mail-archive.com/xen-devel@lists.xen.org/msg109313.html.
-
- Signed-off-by: Bernd Kuhls <bernd.kuhls@t-online.de>
- Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- Acked-by: Wei Liu <wei.liu2@citrix.com>
-
-diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c
-index e7d4bd0..e669fb0 100644
---- a/tools/libxc/xc_dom_arm.c
-+++ b/tools/libxc/xc_dom_arm.c
-@@ -223,6 +223,8 @@ static int set_mode(xc_interface *xch, domid_t domid, char *guest_type)
-
- domctl.domain = domid;
- domctl.cmd = XEN_DOMCTL_set_address_size;
-+ domctl.u.address_size.size = 0;
-+
- for ( i = 0; i < ARRAY_SIZE(types); i++ )
- if ( !strcmp(types[i].guest, guest_type) )
- domctl.u.address_size.size = types[i].size;
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/lwip.dhcp_create_request-hwaddr_len.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/lwip.dhcp_create_request-hwaddr_len.patch
new file mode 100644
index 000000000..4bbf21a18
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/lwip.dhcp_create_request-hwaddr_len.patch
@@ -0,0 +1,13 @@
+Index: src/core/dhcp.c
+===================================================================
+--- a/src/core/dhcp.c
++++ b/src/core/dhcp.c
+@@ -1356,7 +1358,7 @@ dhcp_create_request(struct netif *netif)
+ dhcp->msg_out->giaddr.addr = 0;
+ for (i = 0; i < DHCP_CHADDR_LEN; i++) {
+ /* copy netif hardware address, pad with zeroes */
+- dhcp->msg_out->chaddr[i] = (i < netif->hwaddr_len) ? netif->hwaddr[i] : 0/* pad byte*/;
++ dhcp->msg_out->chaddr[i] = (i < (netif->hwaddr_len > NETIF_MAX_HWADDR_LEN ? NETIF_MAX_HWADDR_LEN : netif->hwaddr_len)) ? netif->hwaddr[i] : 0/* pad byte*/;
+ }
+ for (i = 0; i < DHCP_SNAME_LEN; i++) {
+ dhcp->msg_out->sname[i] = 0;
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/lwip.patch-cvs b/import-layers/meta-virtualization/recipes-extended/xen/files/lwip.patch-cvs
new file mode 100644
index 000000000..b2718778c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/lwip.patch-cvs
@@ -0,0 +1,2398 @@
+? .ChangeLog.swp
+? ChangeLog
+Index: CHANGELOG
+===================================================================
+RCS file: /sources/lwip/lwip/CHANGELOG,v
+retrieving revision 1.300
+retrieving revision 1.318
+diff -u -p -r1.300 -r1.318
+--- a/CHANGELOG 23 Mar 2008 13:49:39 -0000 1.300
++++ b/CHANGELOG 14 Jul 2008 20:12:36 -0000 1.318
+@@ -19,9 +19,77 @@ HISTORY
+
+ ++ New features:
+
++ 2008-06-30 Simon Goldschmidt
++ * mem.c, opt.h, stats.h: fixed bug #21433: Calling mem_free/pbuf_free from
++ interrupt context isn't safe: LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT allows
++ mem_free to run between mem_malloc iterations. Added illegal counter for
++ mem stats.
++
++ 2008-06-27 Simon Goldschmidt
++ * stats.h/.c, some other files: patch #6483: stats module improvement:
++ Added defines to display each module's statistic individually, added stats
++ defines for MEM, MEMP and SYS modules, removed (unused) rexmit counter.
++
++ 2008-06-17 Simon Goldschmidt
++ * err.h: patch #6459: Made err_t overridable to use a more efficient type
++ (define LWIP_ERR_T in cc.h)
++
++ 2008-06-17 Simon Goldschmidt
++ * slipif.c: patch #6480: Added a configuration option for slipif for symmetry
++ to loopif
++
++ 2008-06-17 Simon Goldschmidt (patch by Luca Ceresoli)
++ * netif.c, loopif.c, ip.c, netif.h, loopif.h, opt.h: Checked in slightly
++ modified version of patch # 6370: Moved loopif code to netif.c so that
++ loopback traffic is supported on all netifs (all local IPs).
++ Added option to limit loopback packets for each netifs.
++
+
+ ++ Bugfixes:
+
++ 2008-08-14 Simon Goldschmidt
++ * api_msg.c: fixed bug #23847: do_close_internal references freed memory (when
++ tcp_close returns != ERR_OK)
++
++ 2008-07-08 Frédéric Bernon
++ * stats.h: Fix some build bugs introduced with patch #6483 (missing some parameters
++ in macros, mainly if MEM_STATS=0 and MEMP_STATS=0).
++
++ 2008-06-24 Jonathan Larmour
++ * tcp_in.c: Fix for bug #23693 as suggested by Art R. Ensure cseg is unused
++ if tcp_seg_copy fails.
++
++ 2008-06-17 Simon Goldschmidt
++ * inet_chksum.c: Checked in some ideas of patch #6460 (loop optimizations)
++ and created defines for swapping bytes and folding u32 to u16.
++
++ 2008-05-30 Kieran Mansley
++ * tcp_in.c Remove redundant "if" statement, and use real rcv_wnd
++ rather than rcv_ann_wnd when deciding if packets are in-window.
++ Contributed by <arasmussen@consultant.datasys.swri.edu>
++
++ 2008-05-30 Kieran Mansley
++ * mem.h: Fix BUG#23254. Change macro definition of mem_* to allow
++ passing as function pointers when MEM_LIBC_MALLOC is defined.
++
++ 2008-05-09 Jonathan Larmour
++ * err.h, err.c, sockets.c: Fix bug #23119: Reorder timeout error code to
++ stop it being treated as a fatal error.
++
++ 2008-04-15 Simon Goldschmidt
++ * dhcp.c: fixed bug #22804: dhcp_stop doesn't clear NETIF_FLAG_DHCP
++ (flag now cleared)
++
++ 2008-03-27 Simon Goldschmidt
++ * mem.c, tcpip.c, tcpip.h, opt.h: fixed bug #21433 (Calling mem_free/pbuf_free
++ from interrupt context isn't safe): set LWIP_USE_HEAP_FROM_INTERRUPT to 1
++ in lwipopts.h or use pbuf_free_callback(p)/mem_free_callback(m) to free pbufs
++ or heap memory from interrupt context
++
++ 2008-03-26 Simon Goldschmidt
++ * tcp_in.c, tcp.c: fixed bug #22249: division by zero could occur if a remote
++ host sent a zero mss as TCP option.
++
+
+ (STABLE-1.3.0)
+
+Index: src/api/api_msg.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/api/api_msg.c,v
+retrieving revision 1.102
+retrieving revision 1.104
+diff -u -p -r1.102 -r1.104
+--- a/src/api/api_msg.c 21 Mar 2008 16:23:14 -0000 1.102
++++ b/src/api/api_msg.c 15 Jul 2008 11:18:58 -0000 1.104
+@@ -598,11 +598,16 @@ do_close_internal(struct netconn *conn)
+ LWIP_ASSERT("pcb already closed", (conn->pcb.tcp != NULL));
+
+ /* Set back some callback pointers */
++ tcp_arg(conn->pcb.tcp, NULL);
+ if (conn->pcb.tcp->state == LISTEN) {
+- tcp_arg(conn->pcb.tcp, NULL);
+ tcp_accept(conn->pcb.tcp, NULL);
+ } else {
+ tcp_recv(conn->pcb.tcp, NULL);
++ tcp_accept(conn->pcb.tcp, NULL);
++ /* some callbacks have to be reset if tcp_close is not successful */
++ tcp_sent(conn->pcb.tcp, NULL);
++ tcp_poll(conn->pcb.tcp, NULL, 4);
++ tcp_err(conn->pcb.tcp, NULL);
+ }
+ /* Try to close the connection */
+ err = tcp_close(conn->pcb.tcp);
+@@ -610,11 +615,6 @@ do_close_internal(struct netconn *conn)
+ /* Closing succeeded */
+ conn->state = NETCONN_NONE;
+ /* Set back some callback pointers as conn is going away */
+- tcp_err(conn->pcb.tcp, NULL);
+- tcp_poll(conn->pcb.tcp, NULL, 4);
+- tcp_sent(conn->pcb.tcp, NULL);
+- tcp_recv(conn->pcb.tcp, NULL);
+- tcp_arg(conn->pcb.tcp, NULL);
+ conn->pcb.tcp = NULL;
+ conn->err = ERR_OK;
+ /* Trigger select() in socket layer. This send should something else so the
+@@ -623,6 +623,14 @@ do_close_internal(struct netconn *conn)
+ API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0);
+ /* wake up the application task */
+ sys_sem_signal(conn->op_completed);
++ } else {
++ /* Closing failed, restore some of the callbacks */
++ /* Closing of listen pcb will never fail! */
++ LWIP_ASSERT("Closing a listen pcb may not fail!", (conn->pcb.tcp->state != LISTEN));
++ tcp_sent(conn->pcb.tcp, sent_tcp);
++ tcp_poll(conn->pcb.tcp, poll_tcp, 4);
++ tcp_err(conn->pcb.tcp, err_tcp);
++ tcp_arg(conn->pcb.tcp, conn);
+ }
+ /* If closing didn't succeed, we get called again either
+ from poll_tcp or from sent_tcp */
+Index: src/api/err.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/api/err.c,v
+retrieving revision 1.11
+retrieving revision 1.12
+diff -u -p -r1.11 -r1.12
+--- a/src/api/err.c 13 Dec 2007 23:06:50 -0000 1.11
++++ b/src/api/err.c 9 May 2008 12:14:23 -0000 1.12
+@@ -44,17 +44,17 @@ static const char *err_strerr[] = {
+ "Ok.", /* ERR_OK 0 */
+ "Out of memory error.", /* ERR_MEM -1 */
+ "Buffer error.", /* ERR_BUF -2 */
+- "Routing problem.", /* ERR_RTE -3 */
+- "Connection aborted.", /* ERR_ABRT -4 */
+- "Connection reset.", /* ERR_RST -5 */
+- "Connection closed.", /* ERR_CLSD -6 */
+- "Not connected.", /* ERR_CONN -7 */
+- "Illegal value.", /* ERR_VAL -8 */
+- "Illegal argument.", /* ERR_ARG -9 */
+- "Address in use.", /* ERR_USE -10 */
+- "Low-level netif error.", /* ERR_IF -11 */
+- "Already connected.", /* ERR_ISCONN -12 */
+- "Timeout.", /* ERR_TIMEOUT -13 */
++ "Timeout.", /* ERR_TIMEOUT -3 */
++ "Routing problem.", /* ERR_RTE -4 */
++ "Connection aborted.", /* ERR_ABRT -5 */
++ "Connection reset.", /* ERR_RST -6 */
++ "Connection closed.", /* ERR_CLSD -7 */
++ "Not connected.", /* ERR_CONN -8 */
++ "Illegal value.", /* ERR_VAL -9 */
++ "Illegal argument.", /* ERR_ARG -10 */
++ "Address in use.", /* ERR_USE -11 */
++ "Low-level netif error.", /* ERR_IF -12 */
++ "Already connected.", /* ERR_ISCONN -13 */
+ "Operation in progress." /* ERR_INPROGRESS -14 */
+ };
+
+Index: src/api/netdb.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/api/netdb.c,v
+retrieving revision 1.4
+retrieving revision 1.5
+diff -u -p -r1.4 -r1.5
+--- a/src/api/netdb.c 26 Jan 2008 16:11:39 -0000 1.4
++++ b/src/api/netdb.c 16 Jul 2008 20:36:12 -0000 1.5
+@@ -326,7 +326,8 @@ lwip_getaddrinfo(const char *nodename, c
+ if (nodename != NULL) {
+ /* copy nodename to canonname if specified */
+ size_t namelen = strlen(nodename);
+- ai->ai_canonname = mem_malloc(namelen + 1);
++ LWIP_ASSERT("namelen is too long", (namelen + 1) <= (mem_size_t)-1);
++ ai->ai_canonname = mem_malloc((mem_size_t)(namelen + 1));
+ if (ai->ai_canonname == NULL) {
+ goto memerr;
+ }
+Index: src/api/sockets.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/api/sockets.c,v
+retrieving revision 1.116
+retrieving revision 1.117
+diff -u -p -r1.116 -r1.117
+--- a/src/api/sockets.c 13 Mar 2008 20:03:57 -0000 1.116
++++ b/src/api/sockets.c 9 May 2008 12:14:24 -0000 1.117
+@@ -128,17 +128,17 @@ static const int err_to_errno_table[] =
+ 0, /* ERR_OK 0 No error, everything OK. */
+ ENOMEM, /* ERR_MEM -1 Out of memory error. */
+ ENOBUFS, /* ERR_BUF -2 Buffer error. */
+- EHOSTUNREACH, /* ERR_RTE -3 Routing problem. */
+- ECONNABORTED, /* ERR_ABRT -4 Connection aborted. */
+- ECONNRESET, /* ERR_RST -5 Connection reset. */
+- ESHUTDOWN, /* ERR_CLSD -6 Connection closed. */
+- ENOTCONN, /* ERR_CONN -7 Not connected. */
+- EINVAL, /* ERR_VAL -8 Illegal value. */
+- EIO, /* ERR_ARG -9 Illegal argument. */
+- EADDRINUSE, /* ERR_USE -10 Address in use. */
+- -1, /* ERR_IF -11 Low-level netif error */
+- -1, /* ERR_ISCONN -12 Already connected. */
+- ETIMEDOUT, /* ERR_TIMEOUT -13 Timeout */
++ ETIMEDOUT, /* ERR_TIMEOUT -3 Timeout */
++ EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */
++ ECONNABORTED, /* ERR_ABRT -5 Connection aborted. */
++ ECONNRESET, /* ERR_RST -6 Connection reset. */
++ ESHUTDOWN, /* ERR_CLSD -7 Connection closed. */
++ ENOTCONN, /* ERR_CONN -8 Not connected. */
++ EINVAL, /* ERR_VAL -9 Illegal value. */
++ EIO, /* ERR_ARG -10 Illegal argument. */
++ EADDRINUSE, /* ERR_USE -11 Address in use. */
++ -1, /* ERR_IF -12 Low-level netif error */
++ -1, /* ERR_ISCONN -13 Already connected. */
+ EINPROGRESS /* ERR_INPROGRESS -14 Operation in progress */
+ };
+
+Index: src/api/tcpip.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/api/tcpip.c,v
+retrieving revision 1.70
+retrieving revision 1.73
+diff -u -p -r1.70 -r1.73
+--- a/src/api/tcpip.c 12 Jan 2008 11:52:22 -0000 1.70
++++ b/src/api/tcpip.c 27 Jun 2008 20:34:51 -0000 1.73
+@@ -518,4 +518,42 @@ tcpip_init(void (* initfunc)(void *), vo
+ sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO);
+ }
+
++/**
++ * Simple callback function used with tcpip_callback to free a pbuf
++ * (pbuf_free has a wrong signature for tcpip_callback)
++ *
++ * @param p The pbuf (chain) to be dereferenced.
++ */
++static void
++pbuf_free_int(void *p)
++{
++ struct pbuf *q = p;
++ pbuf_free(q);
++}
++
++/**
++ * A simple wrapper function that allows you to free a pbuf from interrupt context.
++ *
++ * @param p The pbuf (chain) to be dereferenced.
++ * @return ERR_OK if callback could be enqueued, an err_t if not
++ */
++err_t
++pbuf_free_callback(struct pbuf *p)
++{
++ return tcpip_callback_with_block(pbuf_free_int, p, 0);
++}
++
++/**
++ * A simple wrapper function that allows you to free heap memory from
++ * interrupt context.
++ *
++ * @param m the heap memory to free
++ * @return ERR_OK if callback could be enqueued, an err_t if not
++ */
++err_t
++mem_free_callback(void *m)
++{
++ return tcpip_callback_with_block(mem_free, m, 0);
++}
++
+ #endif /* !NO_SYS */
+Index: src/core/dhcp.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/dhcp.c,v
+retrieving revision 1.86
+retrieving revision 1.87
+diff -u -p -r1.86 -r1.87
+--- a/src/core/dhcp.c 4 Mar 2008 14:25:58 -0000 1.86
++++ b/src/core/dhcp.c 15 Apr 2008 17:24:55 -0000 1.87
+@@ -568,6 +568,8 @@ dhcp_start(struct netif *netif)
+ LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;);
+ dhcp = netif->dhcp;
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
++ /* Remove the flag that says this netif is handled by DHCP,
++ it is set when we succeeded starting. */
+ netif->flags &= ~NETIF_FLAG_DHCP;
+
+ /* no DHCP client attached yet? */
+@@ -609,6 +611,7 @@ dhcp_start(struct netif *netif)
+ dhcp_stop(netif);
+ return ERR_MEM;
+ }
++ /* Set the flag that says this netif is handled by DHCP. */
+ netif->flags |= NETIF_FLAG_DHCP;
+ return result;
+ }
+@@ -1063,6 +1066,8 @@ dhcp_stop(struct netif *netif)
+ {
+ struct dhcp *dhcp = netif->dhcp;
+ LWIP_ERROR("dhcp_stop: netif != NULL", (netif != NULL), return;);
++ /* Remove the flag that says this netif is handled by DHCP. */
++ netif->flags &= ~NETIF_FLAG_DHCP;
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | 3, ("dhcp_stop()\n"));
+ /* netif is DHCP configured? */
+Index: src/core/mem.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/mem.c,v
+retrieving revision 1.59
+retrieving revision 1.62
+diff -u -p -r1.59 -r1.62
+--- a/src/core/mem.c 4 Mar 2008 16:31:32 -0000 1.59
++++ b/src/core/mem.c 30 Jun 2008 18:16:51 -0000 1.62
+@@ -177,9 +177,36 @@ static u8_t *ram;
+ static struct mem *ram_end;
+ /** pointer to the lowest free block, this is used for faster search */
+ static struct mem *lfree;
++
+ /** concurrent access protection */
+ static sys_sem_t mem_sem;
+
++#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
++
++static volatile u8_t mem_free_count;
++
++/* Allow mem_free from other (e.g. interrupt) context */
++#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
++#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
++#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
++#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
++#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
++#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
++
++#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
++
++/* Protect the heap only by using a semaphore */
++#define LWIP_MEM_FREE_DECL_PROTECT()
++#define LWIP_MEM_FREE_PROTECT() sys_arch_sem_wait(mem_sem, 0)
++#define LWIP_MEM_FREE_UNPROTECT() sys_sem_signal(mem_sem)
++/* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
++#define LWIP_MEM_ALLOC_DECL_PROTECT()
++#define LWIP_MEM_ALLOC_PROTECT()
++#define LWIP_MEM_ALLOC_UNPROTECT()
++
++#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
++
++
+ /**
+ * "Plug holes" by combining adjacent empty struct mems.
+ * After this function is through, there should not exist
+@@ -255,9 +282,7 @@ mem_init(void)
+ /* initialize the lowest-free pointer to the start of the heap */
+ lfree = (struct mem *)ram;
+
+-#if MEM_STATS
+- lwip_stats.mem.avail = MEM_SIZE_ALIGNED;
+-#endif /* MEM_STATS */
++ MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
+ }
+
+ /**
+@@ -270,6 +295,7 @@ void
+ mem_free(void *rmem)
+ {
+ struct mem *mem;
++ LWIP_MEM_FREE_DECL_PROTECT();
+
+ if (rmem == NULL) {
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | 2, ("mem_free(p == NULL) was called.\n"));
+@@ -277,20 +303,20 @@ mem_free(void *rmem)
+ }
+ LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
+
+- /* protect the heap from concurrent access */
+- sys_arch_sem_wait(mem_sem, 0);
+-
+ LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
+ (u8_t *)rmem < (u8_t *)ram_end);
+
+ if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
++ SYS_ARCH_DECL_PROTECT(lev);
+ LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n"));
+-#if MEM_STATS
+- ++lwip_stats.mem.err;
+-#endif /* MEM_STATS */
+- sys_sem_signal(mem_sem);
++ /* protect mem stats from concurrent access */
++ SYS_ARCH_PROTECT(lev);
++ MEM_STATS_INC(illegal);
++ SYS_ARCH_UNPROTECT(lev);
+ return;
+ }
++ /* protect the heap from concurrent access */
++ LWIP_MEM_FREE_PROTECT();
+ /* Get the corresponding struct mem ... */
+ mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
+ /* ... which has to be in a used state ... */
+@@ -303,13 +329,14 @@ mem_free(void *rmem)
+ lfree = mem;
+ }
+
+-#if MEM_STATS
+- lwip_stats.mem.used -= mem->next - ((u8_t *)mem - ram);
+-#endif /* MEM_STATS */
++ MEM_STATS_DEC_USED(used, mem->next - ((u8_t *)mem - ram));
+
+ /* finally, see if prev or next are free also */
+ plug_holes(mem);
+- sys_sem_signal(mem_sem);
++#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
++ mem_free_count = 1;
++#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
++ LWIP_MEM_FREE_UNPROTECT();
+ }
+
+ /**
+@@ -321,6 +348,8 @@ mem_free(void *rmem)
+ * @param newsize required size after shrinking (needs to be smaller than or
+ * equal to the previous size)
+ * @return for compatibility reasons: is always == rmem, at the moment
++ * or NULL if newsize is > old size, in which case rmem is NOT touched
++ * or freed!
+ */
+ void *
+ mem_realloc(void *rmem, mem_size_t newsize)
+@@ -328,6 +357,8 @@ mem_realloc(void *rmem, mem_size_t newsi
+ mem_size_t size;
+ mem_size_t ptr, ptr2;
+ struct mem *mem, *mem2;
++ /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
++ LWIP_MEM_FREE_DECL_PROTECT();
+
+ /* Expand the size of the allocated memory region so that we can
+ adjust for alignment. */
+@@ -346,7 +377,12 @@ mem_realloc(void *rmem, mem_size_t newsi
+ (u8_t *)rmem < (u8_t *)ram_end);
+
+ if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
++ SYS_ARCH_DECL_PROTECT(lev);
+ LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n"));
++ /* protect mem stats from concurrent access */
++ SYS_ARCH_PROTECT(lev);
++ MEM_STATS_INC(illegal);
++ SYS_ARCH_UNPROTECT(lev);
+ return rmem;
+ }
+ /* Get the corresponding struct mem ... */
+@@ -366,11 +402,9 @@ mem_realloc(void *rmem, mem_size_t newsi
+ }
+
+ /* protect the heap from concurrent access */
+- sys_arch_sem_wait(mem_sem, 0);
++ LWIP_MEM_FREE_PROTECT();
+
+-#if MEM_STATS
+- lwip_stats.mem.used -= (size - newsize);
+-#endif /* MEM_STATS */
++ MEM_STATS_DEC_USED(used, (size - newsize));
+
+ mem2 = (struct mem *)&ram[mem->next];
+ if(mem2->used == 0) {
+@@ -426,7 +460,10 @@ mem_realloc(void *rmem, mem_size_t newsi
+ -> don't do anyhting.
+ -> the remaining space stays unused since it is too small
+ } */
+- sys_sem_signal(mem_sem);
++#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
++ mem_free_count = 1;
++#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
++ LWIP_MEM_FREE_UNPROTECT();
+ return rmem;
+ }
+
+@@ -444,6 +481,10 @@ mem_malloc(mem_size_t size)
+ {
+ mem_size_t ptr, ptr2;
+ struct mem *mem, *mem2;
++#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
++ u8_t local_mem_free_count = 0;
++#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
++ LWIP_MEM_ALLOC_DECL_PROTECT();
+
+ if (size == 0) {
+ return NULL;
+@@ -464,88 +505,101 @@ mem_malloc(mem_size_t size)
+
+ /* protect the heap from concurrent access */
+ sys_arch_sem_wait(mem_sem, 0);
++ LWIP_MEM_ALLOC_PROTECT();
++#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
++ /* run as long as a mem_free disturbed mem_malloc */
++ do {
++ local_mem_free_count = 0;
++#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
++
++ /* Scan through the heap searching for a free block that is big enough,
++ * beginning with the lowest free block.
++ */
++ for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size;
++ ptr = ((struct mem *)&ram[ptr])->next) {
++ mem = (struct mem *)&ram[ptr];
++#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
++ mem_free_count = 0;
++ LWIP_MEM_ALLOC_UNPROTECT();
++ /* allow mem_free to run */
++ LWIP_MEM_ALLOC_PROTECT();
++ if (mem_free_count != 0) {
++ local_mem_free_count = mem_free_count;
++ }
++ mem_free_count = 0;
++#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+
+- /* Scan through the heap searching for a free block that is big enough,
+- * beginning with the lowest free block.
+- */
+- for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size;
+- ptr = ((struct mem *)&ram[ptr])->next) {
+- mem = (struct mem *)&ram[ptr];
+-
+- if ((!mem->used) &&
+- (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
+- /* mem is not used and at least perfect fit is possible:
+- * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
+-
+- if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
+- /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
+- * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
+- * -> split large block, create empty remainder,
+- * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
+- * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
+- * struct mem would fit in but no data between mem2 and mem2->next
+- * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
+- * region that couldn't hold data, but when mem->next gets freed,
+- * the 2 regions would be combined, resulting in more free memory
+- */
+- ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
+- /* create mem2 struct */
+- mem2 = (struct mem *)&ram[ptr2];
+- mem2->used = 0;
+- mem2->next = mem->next;
+- mem2->prev = ptr;
+- /* and insert it between mem and mem->next */
+- mem->next = ptr2;
+- mem->used = 1;
+-
+- if (mem2->next != MEM_SIZE_ALIGNED) {
+- ((struct mem *)&ram[mem2->next])->prev = ptr2;
+- }
+-#if MEM_STATS
+- lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM);
+- if (lwip_stats.mem.max < lwip_stats.mem.used) {
+- lwip_stats.mem.max = lwip_stats.mem.used;
++ if ((!mem->used) &&
++ (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
++ /* mem is not used and at least perfect fit is possible:
++ * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
++
++ if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
++ /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
++ * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
++ * -> split large block, create empty remainder,
++ * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
++ * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
++ * struct mem would fit in but no data between mem2 and mem2->next
++ * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
++ * region that couldn't hold data, but when mem->next gets freed,
++ * the 2 regions would be combined, resulting in more free memory
++ */
++ ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
++ /* create mem2 struct */
++ mem2 = (struct mem *)&ram[ptr2];
++ mem2->used = 0;
++ mem2->next = mem->next;
++ mem2->prev = ptr;
++ /* and insert it between mem and mem->next */
++ mem->next = ptr2;
++ mem->used = 1;
++
++ if (mem2->next != MEM_SIZE_ALIGNED) {
++ ((struct mem *)&ram[mem2->next])->prev = ptr2;
++ }
++ MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
++ } else {
++ /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
++ * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
++ * take care of this).
++ * -> near fit or excact fit: do not split, no mem2 creation
++ * also can't move mem->next directly behind mem, since mem->next
++ * will always be used at this point!
++ */
++ mem->used = 1;
++ MEM_STATS_INC_USED(used, mem->next - ((u8_t *)mem - ram));
+ }
+-#endif /* MEM_STATS */
+- } else {
+- /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
+- * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
+- * take care of this).
+- * -> near fit or excact fit: do not split, no mem2 creation
+- * also can't move mem->next directly behind mem, since mem->next
+- * will always be used at this point!
+- */
+- mem->used = 1;
+-#if MEM_STATS
+- lwip_stats.mem.used += mem->next - ((u8_t *)mem - ram);
+- if (lwip_stats.mem.max < lwip_stats.mem.used) {
+- lwip_stats.mem.max = lwip_stats.mem.used;
+- }
+-#endif /* MEM_STATS */
+- }
+
+- if (mem == lfree) {
+- /* Find next free block after mem and update lowest free pointer */
+- while (lfree->used && lfree != ram_end) {
+- lfree = (struct mem *)&ram[lfree->next];
++ if (mem == lfree) {
++ /* Find next free block after mem and update lowest free pointer */
++ while (lfree->used && lfree != ram_end) {
++ LWIP_MEM_ALLOC_UNPROTECT();
++ /* prevent high interrupt latency... */
++ LWIP_MEM_ALLOC_PROTECT();
++ lfree = (struct mem *)&ram[lfree->next];
++ }
++ LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
+ }
+- LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
+- }
+- sys_sem_signal(mem_sem);
+- LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
+- (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
+- LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
+- (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
+- LWIP_ASSERT("mem_malloc: sanity check alignment",
+- (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
++ LWIP_MEM_ALLOC_UNPROTECT();
++ sys_sem_signal(mem_sem);
++ LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
++ (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
++ LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
++ (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
++ LWIP_ASSERT("mem_malloc: sanity check alignment",
++ (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
+
+- return (u8_t *)mem + SIZEOF_STRUCT_MEM;
++ return (u8_t *)mem + SIZEOF_STRUCT_MEM;
++ }
+ }
+- }
++#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
++ /* if we got interrupted by a mem_free, try again */
++ } while(local_mem_free_count != 0);
++#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+ LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
+-#if MEM_STATS
+- ++lwip_stats.mem.err;
+-#endif /* MEM_STATS */
++ MEM_STATS_INC(err);
++ LWIP_MEM_ALLOC_UNPROTECT();
+ sys_sem_signal(mem_sem);
+ return NULL;
+ }
+Index: src/core/memp.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/memp.c,v
+retrieving revision 1.55
+retrieving revision 1.56
+diff -u -p -r1.55 -r1.56
+--- a/src/core/memp.c 25 Nov 2007 10:43:28 -0000 1.55
++++ b/src/core/memp.c 27 Jun 2008 18:37:54 -0000 1.56
+@@ -252,13 +252,12 @@ memp_init(void)
+ struct memp *memp;
+ u16_t i, j;
+
+-#if MEMP_STATS
+ for (i = 0; i < MEMP_MAX; ++i) {
+- lwip_stats.memp[i].used = lwip_stats.memp[i].max =
+- lwip_stats.memp[i].err = 0;
+- lwip_stats.memp[i].avail = memp_num[i];
++ MEMP_STATS_AVAIL(used, i, 0);
++ MEMP_STATS_AVAIL(max, i, 0);
++ MEMP_STATS_AVAIL(err, i, 0);
++ MEMP_STATS_AVAIL(avail, i, memp_num[i]);
+ }
+-#endif /* MEMP_STATS */
+
+ memp = LWIP_MEM_ALIGN(memp_memory);
+ /* for every pool: */
+@@ -315,20 +314,13 @@ memp_malloc_fn(memp_t type, const char*
+ memp->file = file;
+ memp->line = line;
+ #endif /* MEMP_OVERFLOW_CHECK */
+-#if MEMP_STATS
+- ++lwip_stats.memp[type].used;
+- if (lwip_stats.memp[type].used > lwip_stats.memp[type].max) {
+- lwip_stats.memp[type].max = lwip_stats.memp[type].used;
+- }
+-#endif /* MEMP_STATS */
++ MEMP_STATS_INC_USED(used, type);
+ LWIP_ASSERT("memp_malloc: memp properly aligned",
+ ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0);
+ memp = (struct memp*)((u8_t*)memp + MEMP_SIZE);
+ } else {
+ LWIP_DEBUGF(MEMP_DEBUG | 2, ("memp_malloc: out of memory in pool %s\n", memp_desc[type]));
+-#if MEMP_STATS
+- ++lwip_stats.memp[type].err;
+-#endif /* MEMP_STATS */
++ MEMP_STATS_INC(err, type);
+ }
+
+ SYS_ARCH_UNPROTECT(old_level);
+@@ -365,9 +357,7 @@ memp_free(memp_t type, void *mem)
+ #endif /* MEMP_OVERFLOW_CHECK >= 2 */
+ #endif /* MEMP_OVERFLOW_CHECK */
+
+-#if MEMP_STATS
+- lwip_stats.memp[type].used--;
+-#endif /* MEMP_STATS */
++ MEMP_STATS_DEC(used, type);
+
+ memp->next = memp_tab[type];
+ memp_tab[type] = memp;
+Index: src/core/netif.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/netif.c,v
+retrieving revision 1.65
+retrieving revision 1.68
+diff -u -p -r1.65 -r1.68
+--- a/src/core/netif.c 9 Oct 2007 20:00:55 -0000 1.65
++++ b/src/core/netif.c 19 Jun 2008 16:27:18 -0000 1.68
+@@ -45,6 +45,12 @@
+ #include "lwip/snmp.h"
+ #include "lwip/igmp.h"
+ #include "netif/etharp.h"
++#if ENABLE_LOOPBACK
++#include "lwip/sys.h"
++#if LWIP_NETIF_LOOPBACK_MULTITHREADING
++#include "lwip/tcpip.h"
++#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */
++#endif /* ENABLE_LOOPBACK */
+
+ #if LWIP_NETIF_STATUS_CALLBACK
+ #define NETIF_STATUS_CALLBACK(n) { if (n->status_callback) (n->status_callback)(n); }
+@@ -106,6 +112,10 @@ netif_add(struct netif *netif, struct ip
+ #if LWIP_IGMP
+ netif->igmp_mac_filter = NULL;
+ #endif /* LWIP_IGMP */
++#if ENABLE_LOOPBACK
++ netif->loop_first = NULL;
++ netif->loop_last = NULL;
++#endif /* ENABLE_LOOPBACK */
+
+ /* remember netif specific state information data */
+ netif->state = state;
+@@ -114,6 +124,9 @@ netif_add(struct netif *netif, struct ip
+ #if LWIP_NETIF_HWADDRHINT
+ netif->addr_hint = NULL;
+ #endif /* LWIP_NETIF_HWADDRHINT*/
++#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS
++ netif->loop_cnt_current = 0;
++#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */
+
+ netif_set_addr(netif, ipaddr, netmask, gw);
+
+@@ -493,7 +506,158 @@ u8_t netif_is_link_up(struct netif *neti
+ */
+ void netif_set_link_callback(struct netif *netif, void (* link_callback)(struct netif *netif ))
+ {
+- if ( netif )
+- netif->link_callback = link_callback;
++ if (netif) {
++ netif->link_callback = link_callback;
++ }
+ }
+ #endif /* LWIP_NETIF_LINK_CALLBACK */
++
++#if ENABLE_LOOPBACK
++/**
++ * Send an IP packet to be received on the same netif (loopif-like).
++ * The pbuf is simply copied and handed back to netif->input.
++ * In multithreaded mode, this is done directly since netif->input must put
++ * the packet on a queue.
++ * In callback mode, the packet is put on an internal queue and is fed to
++ * netif->input by netif_poll().
++ *
++ * @param netif the lwip network interface structure
++ * @param p the (IP) packet to 'send'
++ * @param ipaddr the ip address to send the packet to (not used)
++ * @return ERR_OK if the packet has been sent
++ * ERR_MEM if the pbuf used to copy the packet couldn't be allocated
++ */
++err_t
++netif_loop_output(struct netif *netif, struct pbuf *p,
++ struct ip_addr *ipaddr)
++{
++ struct pbuf *r;
++ err_t err;
++ struct pbuf *last;
++#if LWIP_LOOPBACK_MAX_PBUFS
++ u8_t clen = 0;
++#endif /* LWIP_LOOPBACK_MAX_PBUFS */
++ SYS_ARCH_DECL_PROTECT(lev);
++ LWIP_UNUSED_ARG(ipaddr);
++
++ /* Allocate a new pbuf */
++ r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM);
++ if (r == NULL) {
++ return ERR_MEM;
++ }
++#if LWIP_LOOPBACK_MAX_PBUFS
++ clen = pbuf_clen(r);
++ /* check for overflow or too many pbuf on queue */
++ if(((netif->loop_cnt_current + clen) < netif->loop_cnt_current) ||
++ ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) {
++ pbuf_free(r);
++ r = NULL;
++ return ERR_MEM;
++ }
++ netif->loop_cnt_current += clen;
++#endif /* LWIP_LOOPBACK_MAX_PBUFS */
++
++ /* Copy the whole pbuf queue p into the single pbuf r */
++ if ((err = pbuf_copy(r, p)) != ERR_OK) {
++ pbuf_free(r);
++ r = NULL;
++ return err;
++ }
++
++ /* Put the packet on a linked list which gets emptied through calling
++ netif_poll(). */
++
++ /* let last point to the last pbuf in chain r */
++ for (last = r; last->next != NULL; last = last->next);
++
++ SYS_ARCH_PROTECT(lev);
++ if(netif->loop_first != NULL) {
++ LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL);
++ netif->loop_last->next = r;
++ netif->loop_last = last;
++ } else {
++ netif->loop_first = r;
++ netif->loop_last = last;
++ }
++ SYS_ARCH_UNPROTECT(lev);
++
++#if LWIP_NETIF_LOOPBACK_MULTITHREADING
++ /* For multithreading environment, schedule a call to netif_poll */
++ tcpip_callback(netif_poll, netif);
++#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */
++
++ return ERR_OK;
++}
++
++/**
++ * Call netif_poll() in the main loop of your application. This is to prevent
++ * reentering non-reentrant functions like tcp_input(). Packets passed to
++ * netif_loop_output() are put on a list that is passed to netif->input() by
++ * netif_poll().
++ */
++void
++netif_poll(struct netif *netif)
++{
++ struct pbuf *in;
++ SYS_ARCH_DECL_PROTECT(lev);
++
++ do {
++ /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */
++ SYS_ARCH_PROTECT(lev);
++ in = netif->loop_first;
++ if(in != NULL) {
++ struct pbuf *in_end = in;
++#if LWIP_LOOPBACK_MAX_PBUFS
++ u8_t clen = pbuf_clen(in);
++ /* adjust the number of pbufs on queue */
++ LWIP_ASSERT("netif->loop_cnt_current underflow",
++ ((netif->loop_cnt_current - clen) < netif->loop_cnt_current));
++ netif->loop_cnt_current -= clen;
++#endif /* LWIP_LOOPBACK_MAX_PBUFS */
++ while(in_end->len != in_end->tot_len) {
++ LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL);
++ in_end = in_end->next;
++ }
++ /* 'in_end' now points to the last pbuf from 'in' */
++ if(in_end == netif->loop_last) {
++ /* this was the last pbuf in the list */
++ netif->loop_first = netif->loop_last = NULL;
++ } else {
++ /* pop the pbuf off the list */
++ netif->loop_first = in_end->next;
++ LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL);
++ }
++ /* De-queue the pbuf from its successors on the 'loop_' list. */
++ in_end->next = NULL;
++ }
++ SYS_ARCH_UNPROTECT(lev);
++
++ if(in != NULL) {
++ /* loopback packets are always IP packets! */
++ if(ip_input(in, netif) != ERR_OK) {
++ pbuf_free(in);
++ }
++ /* Don't reference the packet any more! */
++ in = NULL;
++ }
++ /* go on while there is a packet on the list */
++ } while(netif->loop_first != NULL);
++}
++
++#if !LWIP_NETIF_LOOPBACK_MULTITHREADING
++/**
++ * Calls netif_poll() for every netif on the netif_list.
++ */
++void
++netif_poll_all(void)
++{
++ struct netif *netif = netif_list;
++ /* loop through netifs */
++ while (netif != NULL) {
++ netif_poll(netif);
++ /* proceed to next network interface */
++ netif = netif->next;
++ }
++}
++#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */
++#endif /* ENABLE_LOOPBACK */
+Index: src/core/pbuf.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/pbuf.c,v
+retrieving revision 1.127
+retrieving revision 1.128
+diff -u -p -r1.127 -r1.128
+--- a/src/core/pbuf.c 4 Mar 2008 16:37:46 -0000 1.127
++++ b/src/core/pbuf.c 1 Apr 2008 19:05:40 -0000 1.128
+@@ -667,8 +667,8 @@ pbuf_dechain(struct pbuf *p)
+ *
+ * @note Only one packet is copied, no packet queue!
+ *
+- * @param p_to pbuf source of the copy
+- * @param p_from pbuf destination of the copy
++ * @param p_to pbuf destination of the copy
++ * @param p_from pbuf source of the copy
+ *
+ * @return ERR_OK if pbuf was copied
+ * ERR_ARG if one of the pbufs is NULL or p_to is not big
+Index: src/core/stats.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/stats.c,v
+retrieving revision 1.27
+retrieving revision 1.28
+diff -u -p -r1.27 -r1.28
+--- a/src/core/stats.c 4 Mar 2008 16:31:32 -0000 1.27
++++ b/src/core/stats.c 27 Jun 2008 18:37:54 -0000 1.28
+@@ -54,7 +54,6 @@ stats_display_proto(struct stats_proto *
+ {
+ LWIP_PLATFORM_DIAG(("\n%s\n\t", name));
+ LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit));
+- LWIP_PLATFORM_DIAG(("rexmit: %"STAT_COUNTER_F"\n\t", proto->rexmit));
+ LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv));
+ LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw));
+ LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop));
+@@ -68,6 +67,7 @@ stats_display_proto(struct stats_proto *
+ LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit));
+ }
+
++#if IGMP_STATS
+ void
+ stats_display_igmp(struct stats_igmp *igmp)
+ {
+@@ -82,7 +82,9 @@ stats_display_igmp(struct stats_igmp *ig
+ LWIP_PLATFORM_DIAG(("report_rxed: %"STAT_COUNTER_F"\n\t", igmp->report_rxed));
+ LWIP_PLATFORM_DIAG(("group_query_rxed: %"STAT_COUNTER_F"\n", igmp->group_query_rxed));
+ }
++#endif /* IGMP_STATS */
+
++#if MEM_STATS || MEMP_STATS
+ void
+ stats_display_mem(struct stats_mem *mem, char *name)
+ {
+@@ -93,48 +95,53 @@ stats_display_mem(struct stats_mem *mem,
+ LWIP_PLATFORM_DIAG(("err: %"U32_F"\n", (u32_t)mem->err));
+ }
+
++#if MEMP_STATS
+ void
+-stats_display(void)
++stats_display_memp(struct stats_mem *mem, int index)
+ {
+-#if MEMP_STATS
+- s16_t i;
+ char * memp_names[] = {
+ #define LWIP_MEMPOOL(name,num,size,desc) desc,
+ #include "lwip/memp_std.h"
+ };
+-#endif
+-#if LINK_STATS
+- stats_display_proto(&lwip_stats.link, "LINK");
+-#endif
+-#if ETHARP_STATS
+- stats_display_proto(&lwip_stats.etharp, "ETHARP");
+-#endif
+-#if IPFRAG_STATS
+- stats_display_proto(&lwip_stats.ip_frag, "IP_FRAG");
+-#endif
+-#if IP_STATS
+- stats_display_proto(&lwip_stats.ip, "IP");
+-#endif
+-#if ICMP_STATS
+- stats_display_proto(&lwip_stats.icmp, "ICMP");
+-#endif
+-#if IGMP_STATS
+- stats_display_igmp(&lwip_stats.igmp);
+-#endif
+-#if UDP_STATS
+- stats_display_proto(&lwip_stats.udp, "UDP");
+-#endif
+-#if TCP_STATS
+- stats_display_proto(&lwip_stats.tcp, "TCP");
+-#endif
+-#if MEM_STATS
+- stats_display_mem(&lwip_stats.mem, "HEAP");
+-#endif
+-#if MEMP_STATS
++ if(index < MEMP_MAX) {
++ stats_display_mem(mem, memp_names[index]);
++ }
++}
++#endif /* MEMP_STATS */
++#endif /* MEM_STATS || MEMP_STATS */
++
++#if SYS_STATS
++void
++stats_display_sys(struct stats_sys *sys)
++{
++ LWIP_PLATFORM_DIAG(("\nSYS\n\t"));
++ LWIP_PLATFORM_DIAG(("sem.used: %"U32_F"\n\t", (u32_t)sys->sem.used));
++ LWIP_PLATFORM_DIAG(("sem.max: %"U32_F"\n\t", (u32_t)sys->sem.max));
++ LWIP_PLATFORM_DIAG(("sem.err: %"U32_F"\n\t", (u32_t)sys->sem.err));
++ LWIP_PLATFORM_DIAG(("mbox.used: %"U32_F"\n\t", (u32_t)sys->mbox.used));
++ LWIP_PLATFORM_DIAG(("mbox.max: %"U32_F"\n\t", (u32_t)sys->mbox.max));
++ LWIP_PLATFORM_DIAG(("mbox.err: %"U32_F"\n\t", (u32_t)sys->mbox.err));
++}
++#endif /* SYS_STATS */
++
++void
++stats_display(void)
++{
++ s16_t i;
++
++ LINK_STATS_DISPLAY();
++ ETHARP_STATS_DISPLAY();
++ IPFRAG_STATS_DISPLAY();
++ IP_STATS_DISPLAY();
++ IGMP_STATS_DISPLAY();
++ ICMP_STATS_DISPLAY();
++ UDP_STATS_DISPLAY();
++ TCP_STATS_DISPLAY();
++ MEM_STATS_DISPLAY();
+ for (i = 0; i < MEMP_MAX; i++) {
+- stats_display_mem(&lwip_stats.memp[i], memp_names[i]);
++ MEMP_STATS_DISPLAY(i);
+ }
+-#endif
++ SYS_STATS_DISPLAY();
+ }
+ #endif /* LWIP_STATS_DISPLAY */
+
+Index: src/core/sys.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/sys.c,v
+retrieving revision 1.32
+retrieving revision 1.33
+diff -u -p -r1.32 -r1.33
+--- a/src/core/sys.c 25 Nov 2007 13:57:05 -0000 1.32
++++ b/src/core/sys.c 16 Jul 2008 20:36:12 -0000 1.33
+@@ -65,7 +65,7 @@ struct sswt_cb
+ void
+ sys_mbox_fetch(sys_mbox_t mbox, void **msg)
+ {
+- u32_t time;
++ u32_t time_needed;
+ struct sys_timeouts *timeouts;
+ struct sys_timeo *tmptimeout;
+ sys_timeout_handler h;
+@@ -76,18 +76,18 @@ sys_mbox_fetch(sys_mbox_t mbox, void **m
+
+ if (!timeouts || !timeouts->next) {
+ UNLOCK_TCPIP_CORE();
+- time = sys_arch_mbox_fetch(mbox, msg, 0);
++ time_needed = sys_arch_mbox_fetch(mbox, msg, 0);
+ LOCK_TCPIP_CORE();
+ } else {
+ if (timeouts->next->time > 0) {
+ UNLOCK_TCPIP_CORE();
+- time = sys_arch_mbox_fetch(mbox, msg, timeouts->next->time);
++ time_needed = sys_arch_mbox_fetch(mbox, msg, timeouts->next->time);
+ LOCK_TCPIP_CORE();
+ } else {
+- time = SYS_ARCH_TIMEOUT;
++ time_needed = SYS_ARCH_TIMEOUT;
+ }
+
+- if (time == SYS_ARCH_TIMEOUT) {
++ if (time_needed == SYS_ARCH_TIMEOUT) {
+ /* If time == SYS_ARCH_TIMEOUT, a timeout occured before a message
+ could be fetched. We should now call the timeout handler and
+ deallocate the memory allocated for the timeout. */
+@@ -107,8 +107,8 @@ sys_mbox_fetch(sys_mbox_t mbox, void **m
+ /* If time != SYS_ARCH_TIMEOUT, a message was received before the timeout
+ occured. The time variable is set to the number of
+ milliseconds we waited for the message. */
+- if (time < timeouts->next->time) {
+- timeouts->next->time -= time;
++ if (time_needed < timeouts->next->time) {
++ timeouts->next->time -= time_needed;
+ } else {
+ timeouts->next->time = 0;
+ }
+@@ -125,7 +125,7 @@ sys_mbox_fetch(sys_mbox_t mbox, void **m
+ void
+ sys_sem_wait(sys_sem_t sem)
+ {
+- u32_t time;
++ u32_t time_needed;
+ struct sys_timeouts *timeouts;
+ struct sys_timeo *tmptimeout;
+ sys_timeout_handler h;
+@@ -139,12 +139,12 @@ sys_sem_wait(sys_sem_t sem)
+ sys_arch_sem_wait(sem, 0);
+ } else {
+ if (timeouts->next->time > 0) {
+- time = sys_arch_sem_wait(sem, timeouts->next->time);
++ time_needed = sys_arch_sem_wait(sem, timeouts->next->time);
+ } else {
+- time = SYS_ARCH_TIMEOUT;
++ time_needed = SYS_ARCH_TIMEOUT;
+ }
+
+- if (time == SYS_ARCH_TIMEOUT) {
++ if (time_needed == SYS_ARCH_TIMEOUT) {
+ /* If time == SYS_ARCH_TIMEOUT, a timeout occured before a message
+ could be fetched. We should now call the timeout handler and
+ deallocate the memory allocated for the timeout. */
+@@ -164,8 +164,8 @@ sys_sem_wait(sys_sem_t sem)
+ /* If time != SYS_ARCH_TIMEOUT, a message was received before the timeout
+ occured. The time variable is set to the number of
+ milliseconds we waited for the message. */
+- if (time < timeouts->next->time) {
+- timeouts->next->time -= time;
++ if (time_needed < timeouts->next->time) {
++ timeouts->next->time -= time_needed;
+ } else {
+ timeouts->next->time = 0;
+ }
+Index: src/core/tcp.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/tcp.c,v
+retrieving revision 1.85
+retrieving revision 1.86
+diff -u -p -r1.85 -r1.86
+--- a/src/core/tcp.c 22 Jan 2008 21:15:15 -0000 1.85
++++ b/src/core/tcp.c 26 Mar 2008 11:57:13 -0000 1.86
+@@ -509,7 +509,8 @@ tcp_connect(struct tcp_pcb *pcb, struct
+ pcb->rcv_wnd = TCP_WND;
+ pcb->rcv_ann_wnd = TCP_WND;
+ pcb->snd_wnd = TCP_WND;
+- /* The send MSS is updated when an MSS option is received. */
++ /* As initial send MSS, we use TCP_MSS but limit it to 536.
++ The send MSS is updated when an MSS option is received. */
+ pcb->mss = (TCP_MSS > 536) ? 536 : TCP_MSS;
+ #if TCP_CALCULATE_EFF_SEND_MSS
+ pcb->mss = tcp_eff_send_mss(pcb->mss, ipaddr);
+@@ -991,7 +992,8 @@ tcp_alloc(u8_t prio)
+ pcb->rcv_ann_wnd = TCP_WND;
+ pcb->tos = 0;
+ pcb->ttl = TCP_TTL;
+- /* The send MSS is updated when an MSS option is received. */
++ /* As initial send MSS, we use TCP_MSS but limit it to 536.
++ The send MSS is updated when an MSS option is received. */
+ pcb->mss = (TCP_MSS > 536) ? 536 : TCP_MSS;
+ pcb->rto = 3000 / TCP_SLOW_INTERVAL;
+ pcb->sa = 0;
+Index: src/core/tcp_in.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/tcp_in.c,v
+retrieving revision 1.97
+retrieving revision 1.100
+diff -u -p -r1.97 -r1.100
+--- a/src/core/tcp_in.c 22 Jan 2008 21:15:15 -0000 1.97
++++ b/src/core/tcp_in.c 24 Jun 2008 15:46:39 -0000 1.100
+@@ -511,7 +511,7 @@ tcp_process(struct tcp_pcb *pcb)
+ }
+ } else {
+ if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt,
+- pcb->rcv_nxt+pcb->rcv_ann_wnd)) {
++ pcb->rcv_nxt+pcb->rcv_wnd)) {
+ acceptable = 1;
+ }
+ }
+@@ -1038,7 +1038,7 @@ tcp_receive(struct tcp_pcb *pcb)
+ and below rcv_nxt + rcv_wnd) in order to be further
+ processed. */
+ if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt,
+- pcb->rcv_nxt + pcb->rcv_ann_wnd - 1)){
++ pcb->rcv_nxt + pcb->rcv_wnd - 1)){
+ if (pcb->rcv_nxt == seqno) {
+ accepted_inseq = 1;
+ /* The incoming segment is the next in sequence. We check if
+@@ -1195,14 +1195,14 @@ tcp_receive(struct tcp_pcb *pcb)
+ } else {
+ pcb->ooseq = cseg;
+ }
+- }
+- tcp_seg_free(next);
+- if (cseg->next != NULL) {
+- next = cseg->next;
+- if (TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) {
+- /* We need to trim the incoming segment. */
+- cseg->len = (u16_t)(next->tcphdr->seqno - seqno);
+- pbuf_realloc(cseg->p, cseg->len);
++ tcp_seg_free(next);
++ if (cseg->next != NULL) {
++ next = cseg->next;
++ if (TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) {
++ /* We need to trim the incoming segment. */
++ cseg->len = (u16_t)(next->tcphdr->seqno - seqno);
++ pbuf_realloc(cseg->p, cseg->len);
++ }
+ }
+ }
+ break;
+@@ -1282,10 +1282,7 @@ tcp_receive(struct tcp_pcb *pcb)
+
+ }
+ } else {
+- if(!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt,
+- pcb->rcv_nxt + pcb->rcv_ann_wnd-1)){
+- tcp_ack_now(pcb);
+- }
++ tcp_ack_now(pcb);
+ }
+ } else {
+ /* Segments with length 0 is taken care of here. Segments that
+@@ -1331,7 +1328,8 @@ tcp_parseopt(struct tcp_pcb *pcb)
+ opts[c + 1] == 0x04) {
+ /* An MSS option with the right option length. */
+ mss = (opts[c + 2] << 8) | opts[c + 3];
+- pcb->mss = mss > TCP_MSS? TCP_MSS: mss;
++ /* Limit the mss to the configured TCP_MSS and prevent division by zero */
++ pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss;
+
+ /* And we are done processing options. */
+ break;
+Index: src/core/ipv4/autoip.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/ipv4/autoip.c,v
+retrieving revision 1.16
+retrieving revision 1.17
+diff -u -p -r1.16 -r1.17
+--- a/src/core/ipv4/autoip.c 26 Jan 2008 16:11:40 -0000 1.16
++++ b/src/core/ipv4/autoip.c 17 Jun 2008 20:16:23 -0000 1.17
+@@ -395,8 +395,8 @@ autoip_arp_reply(struct netif *netif, st
+ /* Copy struct ip_addr2 to aligned ip_addr, to support compilers without
+ * structure packing (not using structure copy which breaks strict-aliasing rules).
+ */
+- MEMCPY(&sipaddr, &hdr->sipaddr, sizeof(sipaddr));
+- MEMCPY(&dipaddr, &hdr->dipaddr, sizeof(dipaddr));
++ SMEMCPY(&sipaddr, &hdr->sipaddr, sizeof(sipaddr));
++ SMEMCPY(&dipaddr, &hdr->dipaddr, sizeof(dipaddr));
+
+ if ((netif->autoip->state == AUTOIP_STATE_PROBING) ||
+ ((netif->autoip->state == AUTOIP_STATE_ANNOUNCING) &&
+Index: src/core/ipv4/inet_chksum.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/ipv4/inet_chksum.c,v
+retrieving revision 1.4
+retrieving revision 1.5
+diff -u -p -r1.4 -r1.5
+--- a/src/core/ipv4/inet_chksum.c 10 Mar 2008 16:12:31 -0000 1.4
++++ b/src/core/ipv4/inet_chksum.c 17 Jun 2008 20:06:25 -0000 1.5
+@@ -41,8 +41,6 @@
+ #include "lwip/inet_chksum.h"
+ #include "lwip/inet.h"
+
+-#include <string.h>
+-
+ /* These are some reference implementations of the checksum algorithm, with the
+ * aim of being simple, correct and fully portable. Checksumming is the
+ * first thing you would want to optimize for your platform. If you create
+@@ -65,6 +63,11 @@
+ # define LWIP_CHKSUM_ALGORITHM 0
+ #endif
+
++/** Like the name says... */
++#define SWAP_BYTES_IN_WORD(w) ((w & 0xff) << 8) | ((w & 0xff00) >> 8)
++/** Split an u32_t in two u16_ts and add them up */
++#define FOLD_U32T(u) ((u >> 16) + (u & 0x0000ffffUL))
++
+ #if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */
+ /**
+ * lwip checksum
+@@ -86,8 +89,7 @@ lwip_standard_chksum(void *dataptr, u16_
+ acc = 0;
+ /* dataptr may be at odd or even addresses */
+ octetptr = (u8_t*)dataptr;
+- while (len > 1)
+- {
++ while (len > 1) {
+ /* declare first octet as most significant
+ thus assume network order, ignoring host order */
+ src = (*octetptr) << 8;
+@@ -98,8 +100,7 @@ lwip_standard_chksum(void *dataptr, u16_
+ acc += src;
+ len -= 2;
+ }
+- if (len > 0)
+- {
++ if (len > 0) {
+ /* accumulate remaining octet */
+ src = (*octetptr) << 8;
+ acc += src;
+@@ -154,19 +155,22 @@ lwip_standard_chksum(void *dataptr, int
+ }
+
+ /* Consume left-over byte, if any */
+- if (len > 0)
++ if (len > 0) {
+ ((u8_t *)&t)[0] = *(u8_t *)ps;;
++ }
+
+ /* Add end bytes */
+ sum += t;
+
+- /* Fold 32-bit sum to 16 bits */
+- while ((sum >> 16) != 0)
+- sum = (sum & 0xffff) + (sum >> 16);
++ /* Fold 32-bit sum to 16 bits
++ calling this twice is propably faster than if statements... */
++ sum = FOLD_U32T(sum);
++ sum = FOLD_U32T(sum);
+
+ /* Swap if alignment was odd */
+- if (odd)
+- sum = ((sum & 0xff) << 8) | ((sum & 0xff00) >> 8);
++ if (odd) {
++ sum = SWAP_BYTES_IN_WORD(sum);
++ }
+
+ return sum;
+ }
+@@ -211,18 +215,20 @@ lwip_standard_chksum(void *dataptr, int
+
+ while (len > 7) {
+ tmp = sum + *pl++; /* ping */
+- if (tmp < sum)
++ if (tmp < sum) {
+ tmp++; /* add back carry */
++ }
+
+ sum = tmp + *pl++; /* pong */
+- if (sum < tmp)
++ if (sum < tmp) {
+ sum++; /* add back carry */
++ }
+
+ len -= 8;
+ }
+
+ /* make room in upper bits */
+- sum = (sum >> 16) + (sum & 0xffff);
++ sum = FOLD_U32T(sum);
+
+ ps = (u16_t *)pl;
+
+@@ -233,16 +239,20 @@ lwip_standard_chksum(void *dataptr, int
+ }
+
+ /* dangling tail byte remaining? */
+- if (len > 0) /* include odd byte */
++ if (len > 0) { /* include odd byte */
+ ((u8_t *)&t)[0] = *(u8_t *)ps;
++ }
+
+ sum += t; /* add end bytes */
+
+- while ((sum >> 16) != 0) /* combine halves */
+- sum = (sum >> 16) + (sum & 0xffff);
++ /* Fold 32-bit sum to 16 bits
++ calling this twice is propably faster than if statements... */
++ sum = FOLD_U32T(sum);
++ sum = FOLD_U32T(sum);
+
+- if (odd)
+- sum = ((sum & 0xff) << 8) | ((sum & 0xff00) >> 8);
++ if (odd) {
++ sum = SWAP_BYTES_IN_WORD(sum);
++ }
+
+ return sum;
+ }
+@@ -277,18 +287,18 @@ inet_chksum_pseudo(struct pbuf *p,
+ (void *)q, (void *)q->next));
+ acc += LWIP_CHKSUM(q->payload, q->len);
+ /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/
+- while ((acc >> 16) != 0) {
+- acc = (acc & 0xffffUL) + (acc >> 16);
+- }
++ /* just executing this next line is probably faster that the if statement needed
++ to check whether we really need to execute it, and does no harm */
++ acc = FOLD_U32T(acc);
+ if (q->len % 2 != 0) {
+ swapped = 1 - swapped;
+- acc = ((acc & 0xff) << 8) | ((acc & 0xff00UL) >> 8);
++ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/
+ }
+
+ if (swapped) {
+- acc = ((acc & 0xff) << 8) | ((acc & 0xff00UL) >> 8);
++ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ acc += (src->addr & 0xffffUL);
+ acc += ((src->addr >> 16) & 0xffffUL);
+@@ -297,9 +307,10 @@ inet_chksum_pseudo(struct pbuf *p,
+ acc += (u32_t)htons((u16_t)proto);
+ acc += (u32_t)htons(proto_len);
+
+- while ((acc >> 16) != 0) {
+- acc = (acc & 0xffffUL) + (acc >> 16);
+- }
++ /* Fold 32-bit sum to 16 bits
++ calling this twice is propably faster than if statements... */
++ acc = FOLD_U32T(acc);
++ acc = FOLD_U32T(acc);
+ LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc));
+ return (u16_t)~(acc & 0xffffUL);
+ }
+@@ -340,18 +351,17 @@ inet_chksum_pseudo_partial(struct pbuf *
+ chksum_len -= chklen;
+ LWIP_ASSERT("delete me", chksum_len < 0x7fff);
+ /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/
+- while ((acc >> 16) != 0) {
+- acc = (acc & 0xffffUL) + (acc >> 16);
+- }
++ /* fold the upper bit down */
++ acc = FOLD_U32T(acc);
+ if (q->len % 2 != 0) {
+ swapped = 1 - swapped;
+- acc = ((acc & 0xff) << 8) | ((acc & 0xff00UL) >> 8);
++ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/
+ }
+
+ if (swapped) {
+- acc = ((acc & 0xff) << 8) | ((acc & 0xff00UL) >> 8);
++ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ acc += (src->addr & 0xffffUL);
+ acc += ((src->addr >> 16) & 0xffffUL);
+@@ -360,9 +370,10 @@ inet_chksum_pseudo_partial(struct pbuf *
+ acc += (u32_t)htons((u16_t)proto);
+ acc += (u32_t)htons(proto_len);
+
+- while ((acc >> 16) != 0) {
+- acc = (acc & 0xffffUL) + (acc >> 16);
+- }
++ /* Fold 32-bit sum to 16 bits
++ calling this twice is propably faster than if statements... */
++ acc = FOLD_U32T(acc);
++ acc = FOLD_U32T(acc);
+ LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc));
+ return (u16_t)~(acc & 0xffffUL);
+ }
+@@ -380,13 +391,7 @@ inet_chksum_pseudo_partial(struct pbuf *
+ u16_t
+ inet_chksum(void *dataptr, u16_t len)
+ {
+- u32_t acc;
+-
+- acc = LWIP_CHKSUM(dataptr, len);
+- while ((acc >> 16) != 0) {
+- acc = (acc & 0xffff) + (acc >> 16);
+- }
+- return (u16_t)~(acc & 0xffff);
++ return ~LWIP_CHKSUM(dataptr, len);
+ }
+
+ /**
+@@ -407,17 +412,15 @@ inet_chksum_pbuf(struct pbuf *p)
+ swapped = 0;
+ for(q = p; q != NULL; q = q->next) {
+ acc += LWIP_CHKSUM(q->payload, q->len);
+- while ((acc >> 16) != 0) {
+- acc = (acc & 0xffffUL) + (acc >> 16);
+- }
++ acc = FOLD_U32T(acc);
+ if (q->len % 2 != 0) {
+ swapped = 1 - swapped;
+- acc = (acc & 0x00ffUL << 8) | (acc & 0xff00UL >> 8);
++ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ }
+
+ if (swapped) {
+- acc = ((acc & 0x00ffUL) << 8) | ((acc & 0xff00UL) >> 8);
++ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ return (u16_t)~(acc & 0xffffUL);
+ }
+Index: src/core/ipv4/ip.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/core/ipv4/ip.c,v
+retrieving revision 1.66
+retrieving revision 1.68
+diff -u -p -r1.66 -r1.68
+--- a/src/core/ipv4/ip.c 14 Jan 2008 20:53:23 -0000 1.66
++++ b/src/core/ipv4/ip.c 17 Jun 2008 19:39:22 -0000 1.68
+@@ -531,9 +531,19 @@ ip_output_if(struct pbuf *p, struct ip_a
+ LWIP_DEBUGF(IP_DEBUG, ("ip_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], netif->num));
+ ip_debug_print(p);
+
+- LWIP_DEBUGF(IP_DEBUG, ("netif->output()"));
++#if (LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF)
++ if (ip_addr_cmp(dest, &netif->ip_addr)) {
++ /* Packet to self, enqueue it for loopback */
++ LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()"));
++
++ return netif_loop_output(netif, p, dest);
++ } else
++#endif /* (LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF) */
++ {
++ LWIP_DEBUGF(IP_DEBUG, ("netif->output()"));
+
+- return netif->output(netif, p, dest);
++ return netif->output(netif, p, dest);
++ }
+ }
+
+ /**
+Index: src/include/lwip/debug.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/debug.h,v
+retrieving revision 1.37
+retrieving revision 1.39
+diff -u -p -r1.37 -r1.39
+--- a/src/include/lwip/debug.h 22 Sep 2007 11:16:07 -0000 1.37
++++ b/src/include/lwip/debug.h 16 Jul 2008 20:36:22 -0000 1.39
+@@ -61,26 +61,28 @@
+ #define LWIP_DBG_HALT 0x08U
+
+ #ifndef LWIP_NOASSERT
+-#define LWIP_ASSERT(x,y) do { if(!(y)) LWIP_PLATFORM_ASSERT(x); } while(0)
++#define LWIP_ASSERT(message, assertion) do { if(!(assertion)) \
++ LWIP_PLATFORM_ASSERT(message); } while(0)
+ #else /* LWIP_NOASSERT */
+-#define LWIP_ASSERT(x,y)
++#define LWIP_ASSERT(message, assertion)
+ #endif /* LWIP_NOASSERT */
+
+-/** print "m" message only if "e" is true, and execute "h" expression */
++/** if "expression" isn't true, then print "message" and execute "handler" expression */
+ #ifndef LWIP_ERROR
+-#define LWIP_ERROR(m,e,h) do { if (!(e)) { LWIP_PLATFORM_ASSERT(m); h;}} while(0)
++#define LWIP_ERROR(message, expression, handler) do { if (!(expression)) { \
++ LWIP_PLATFORM_ASSERT(message); handler;}} while(0)
+ #endif /* LWIP_ERROR */
+
+ #ifdef LWIP_DEBUG
+ /** print debug message only if debug message type is enabled...
+ * AND is of correct type AND is at least LWIP_DBG_LEVEL
+ */
+-#define LWIP_DEBUGF(debug,x) do { \
++#define LWIP_DEBUGF(debug, message) do { \
+ if ( \
+ ((debug) & LWIP_DBG_ON) && \
+ ((debug) & LWIP_DBG_TYPES_ON) && \
+ ((s16_t)((debug) & LWIP_DBG_MASK_LEVEL) >= LWIP_DBG_MIN_LEVEL)) { \
+- LWIP_PLATFORM_DIAG(x); \
++ LWIP_PLATFORM_DIAG(message); \
+ if ((debug) & LWIP_DBG_HALT) { \
+ while(1); \
+ } \
+@@ -88,7 +90,7 @@
+ } while(0)
+
+ #else /* LWIP_DEBUG */
+-#define LWIP_DEBUGF(debug,x)
++#define LWIP_DEBUGF(debug, message)
+ #endif /* LWIP_DEBUG */
+
+ #endif /* __LWIP_DEBUG_H__ */
+Index: src/include/lwip/err.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/err.h,v
+retrieving revision 1.13
+retrieving revision 1.15
+diff -u -p -r1.13 -r1.15
+--- a/src/include/lwip/err.h 13 Dec 2007 23:06:50 -0000 1.13
++++ b/src/include/lwip/err.h 17 Jun 2008 20:27:32 -0000 1.15
+@@ -33,37 +33,43 @@
+ #define __LWIP_ERR_H__
+
+ #include "lwip/opt.h"
++#include "lwip/arch.h"
+
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+
+-typedef s8_t err_t;
++/** Define LWIP_ERR_T in cc.h if you want to use
++ * a different type for your platform (must be signed). */
++#ifdef LWIP_ERR_T
++typedef LWIP_ERR_T err_t;
++#else /* LWIP_ERR_T */
++ typedef s8_t err_t;
++#endif /* LWIP_ERR_T*/
+
+ /* Definitions for error constants. */
+
+ #define ERR_OK 0 /* No error, everything OK. */
+ #define ERR_MEM -1 /* Out of memory error. */
+ #define ERR_BUF -2 /* Buffer error. */
+-#define ERR_RTE -3 /* Routing problem. */
++#define ERR_TIMEOUT -3 /* Timeout. */
++#define ERR_RTE -4 /* Routing problem. */
+
+ #define ERR_IS_FATAL(e) ((e) < ERR_RTE)
+
+-#define ERR_ABRT -4 /* Connection aborted. */
+-#define ERR_RST -5 /* Connection reset. */
+-#define ERR_CLSD -6 /* Connection closed. */
+-#define ERR_CONN -7 /* Not connected. */
++#define ERR_ABRT -5 /* Connection aborted. */
++#define ERR_RST -6 /* Connection reset. */
++#define ERR_CLSD -7 /* Connection closed. */
++#define ERR_CONN -8 /* Not connected. */
+
+-#define ERR_VAL -8 /* Illegal value. */
++#define ERR_VAL -9 /* Illegal value. */
+
+-#define ERR_ARG -9 /* Illegal argument. */
++#define ERR_ARG -10 /* Illegal argument. */
+
+-#define ERR_USE -10 /* Address in use. */
++#define ERR_USE -11 /* Address in use. */
+
+-#define ERR_IF -11 /* Low-level netif error */
+-#define ERR_ISCONN -12 /* Already connected. */
+-
+-#define ERR_TIMEOUT -13 /* Timeout. */
++#define ERR_IF -12 /* Low-level netif error */
++#define ERR_ISCONN -13 /* Already connected. */
+
+ #define ERR_INPROGRESS -14 /* Operation in progress */
+
+Index: src/include/lwip/mem.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/mem.h,v
+retrieving revision 1.21
+retrieving revision 1.22
+diff -u -p -r1.21 -r1.22
+--- a/src/include/lwip/mem.h 4 Mar 2008 16:31:32 -0000 1.21
++++ b/src/include/lwip/mem.h 30 May 2008 11:37:15 -0000 1.22
+@@ -50,16 +50,16 @@ typedef size_t mem_size_t;
+ * allow these defines to be overridden.
+ */
+ #ifndef mem_free
+-#define mem_free(x) free(x)
++#define mem_free free
+ #endif
+ #ifndef mem_malloc
+-#define mem_malloc(x) malloc(x)
++#define mem_malloc malloc
+ #endif
+ #ifndef mem_calloc
+-#define mem_calloc(x, y) calloc(x, y)
++#define mem_calloc calloc
+ #endif
+ #ifndef mem_realloc
+-#define mem_realloc(x, size) (x)
++#define mem_realloc realloc
+ #endif
+ #else /* MEM_LIBC_MALLOC */
+
+Index: src/include/lwip/netif.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/netif.h,v
+retrieving revision 1.43
+retrieving revision 1.46
+diff -u -p -r1.43 -r1.46
+--- a/src/include/lwip/netif.h 9 Oct 2007 19:59:59 -0000 1.43
++++ b/src/include/lwip/netif.h 19 Jun 2008 16:27:23 -0000 1.46
+@@ -34,6 +34,8 @@
+
+ #include "lwip/opt.h"
+
++#define ENABLE_LOOPBACK (LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF)
++
+ #include "lwip/err.h"
+
+ #include "lwip/ip_addr.h"
+@@ -165,6 +167,14 @@ struct netif {
+ #if LWIP_NETIF_HWADDRHINT
+ u8_t *addr_hint;
+ #endif /* LWIP_NETIF_HWADDRHINT */
++#if ENABLE_LOOPBACK
++ /* List of packets to be queued for ourselves. */
++ struct pbuf *loop_first;
++ struct pbuf *loop_last;
++#if LWIP_LOOPBACK_MAX_PBUFS
++ u16_t loop_cnt_current;
++#endif /* LWIP_LOOPBACK_MAX_PBUFS */
++#endif /* ENABLE_LOOPBACK */
+ };
+
+ #if LWIP_SNMP
+@@ -242,4 +252,12 @@ void netif_set_link_callback(struct neti
+ }
+ #endif
+
++#if ENABLE_LOOPBACK
++err_t netif_loop_output(struct netif *netif, struct pbuf *p, struct ip_addr *dest_ip);
++void netif_poll(struct netif *netif);
++#if !LWIP_NETIF_LOOPBACK_MULTITHREADING
++void netif_poll_all(void);
++#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */
++#endif /* ENABLE_LOOPBACK */
++
+ #endif /* __LWIP_NETIF_H__ */
+Index: src/include/lwip/opt.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/opt.h,v
+retrieving revision 1.116
+retrieving revision 1.122
+diff -u -p -r1.116 -r1.122
+--- a/src/include/lwip/opt.h 31 Jan 2008 18:19:29 -0000 1.116
++++ b/src/include/lwip/opt.h 30 Jun 2008 18:16:52 -0000 1.122
+@@ -155,6 +155,27 @@
+ #define MEMP_USE_CUSTOM_POOLS 0
+ #endif
+
++/**
++ * Set this to 1 if you want to free PBUF_RAM pbufs (or call mem_free()) from
++ * interrupt context (or another context that doesn't allow waiting for a
++ * semaphore).
++ * If set to 1, mem_malloc will be protected by a semaphore and SYS_ARCH_PROTECT,
++ * while mem_free will only use SYS_ARCH_PROTECT. mem_malloc SYS_ARCH_UNPROTECTs
++ * with each loop so that mem_free can run.
++ *
++ * ATTENTION: As you can see from the above description, this leads to dis-/
++ * enabling interrupts often, which can be slow! Also, on low memory, mem_malloc
++ * can need longer.
++ *
++ * If you don't want that, at least for NO_SYS=0, you can still use the following
++ * functions to enqueue a deallocation call which then runs in the tcpip_thread
++ * context:
++ * - pbuf_free_callback(p);
++ * - mem_free_callback(m);
++ */
++#ifndef LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
++#define LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 0
++#endif
+
+ /*
+ ------------------------------------------------
+@@ -815,6 +836,39 @@
+ #define LWIP_NETIF_HWADDRHINT 0
+ #endif
+
++/**
++ * LWIP_NETIF_LOOPBACK==1: Support sending packets with a destination IP
++ * address equal to the netif IP address, looping them back up the stack.
++ */
++#ifndef LWIP_NETIF_LOOPBACK
++#define LWIP_NETIF_LOOPBACK 0
++#endif
++
++/**
++ * LWIP_LOOPBACK_MAX_PBUFS: Maximum number of pbufs on queue for loopback
++ * sending for each netif (0 = disabled)
++ */
++#ifndef LWIP_LOOPBACK_MAX_PBUFS
++#define LWIP_LOOPBACK_MAX_PBUFS 0
++#endif
++
++/**
++ * LWIP_NETIF_LOOPBACK_MULTITHREADING: Indicates whether threading is enabled in
++ * the system, as netifs must change how they behave depending on this setting
++ * for the LWIP_NETIF_LOOPBACK option to work.
++ * Setting this is needed to avoid reentering non-reentrant functions like
++ * tcp_input().
++ * LWIP_NETIF_LOOPBACK_MULTITHREADING==1: Indicates that the user is using a
++ * multithreaded environment like tcpip.c. In this case, netif->input()
++ * is called directly.
++ * LWIP_NETIF_LOOPBACK_MULTITHREADING==0: Indicates a polling (or NO_SYS) setup.
++ * The packets are put on a list and netif_poll() must be called in
++ * the main application loop.
++ */
++#ifndef LWIP_NETIF_LOOPBACK_MULTITHREADING
++#define LWIP_NETIF_LOOPBACK_MULTITHREADING (!NO_SYS)
++#endif
++
+ /*
+ ------------------------------------
+ ---------- LOOPIF options ----------
+@@ -827,20 +881,16 @@
+ #define LWIP_HAVE_LOOPIF 0
+ #endif
+
++/*
++ ------------------------------------
++ ---------- SLIPIF options ----------
++ ------------------------------------
++*/
+ /**
+- * LWIP_LOOPIF_MULTITHREADING: Indicates whether threading is enabled in
+- * the system, as LOOPIF must change how it behaves depending on this setting.
+- * Setting this is needed to avoid reentering non-reentrant functions like
+- * tcp_input().
+- * LWIP_LOOPIF_MULTITHREADING==1: Indicates that the user is using a
+- * multithreaded environment like tcpip.c. In this case, netif->input()
+- * is called directly.
+- * LWIP_LOOPIF_MULTITHREADING==0: Indicates a polling (or NO_SYS) setup.
+- * The packets are put on a list and loopif_poll() must be called in
+- * the main application loop.
++ * LWIP_HAVE_SLIPIF==1: Support slip interface and slipif.c
+ */
+-#ifndef LWIP_LOOPIF_MULTITHREADING
+-#define LWIP_LOOPIF_MULTITHREADING 1
++#ifndef LWIP_HAVE_SLIPIF
++#define LWIP_HAVE_SLIPIF 0
+ #endif
+
+ /*
+Index: src/include/lwip/sio.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/sio.h,v
+retrieving revision 1.7
+retrieving revision 1.8
+diff -u -p -r1.7 -r1.8
+--- a/src/include/lwip/sio.h 6 Sep 2007 16:43:44 -0000 1.7
++++ b/src/include/lwip/sio.h 27 Mar 2008 18:06:02 -0000 1.8
+@@ -32,16 +32,24 @@
+ * It needs to be implemented by those platforms which need SLIP or PPP
+ */
+
++#ifndef __SIO_H__
++#define __SIO_H__
++
+ #include "lwip/arch.h"
+
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+
++/* If you want to define sio_fd_t elsewhere or differently,
++ define this in your cc.h file. */
+ #ifndef __sio_fd_t_defined
+ typedef void * sio_fd_t;
+ #endif
+
++/* The following functions can be defined to something else in your cc.h file
++ or be implemented in your custom sio.c file. */
++
+ #ifndef sio_open
+ sio_fd_t sio_open(u8_t);
+ #endif
+@@ -69,3 +77,5 @@ void sio_read_abort(sio_fd_t);
+ #ifdef __cplusplus
+ }
+ #endif
++
++#endif /* __SIO_H__ */
+Index: src/include/lwip/sockets.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/sockets.h,v
+retrieving revision 1.38
+retrieving revision 1.39
+diff -u -p -r1.38 -r1.39
+--- a/src/include/lwip/sockets.h 2 Dec 2007 15:24:02 -0000 1.38
++++ b/src/include/lwip/sockets.h 26 Apr 2008 10:46:23 -0000 1.39
+@@ -177,7 +177,22 @@ typedef struct ip_mreq {
+ } ip_mreq;
+ #endif /* LWIP_IGMP */
+
+-/* Unimplemented for now... */
++/*
++ * The Type of Service provides an indication of the abstract
++ * parameters of the quality of service desired. These parameters are
++ * to be used to guide the selection of the actual service parameters
++ * when transmitting a datagram through a particular network. Several
++ * networks offer service precedence, which somehow treats high
++ * precedence traffic as more important than other traffic (generally
++ * by accepting only traffic above a certain precedence at time of high
++ * load). The major choice is a three way tradeoff between low-delay,
++ * high-reliability, and high-throughput.
++ * The use of the Delay, Throughput, and Reliability indications may
++ * increase the cost (in some sense) of the service. In many networks
++ * better performance for one of these parameters is coupled with worse
++ * performance on another. Except for very unusual cases at most two
++ * of these three indications should be set.
++ */
+ #define IPTOS_TOS_MASK 0x1E
+ #define IPTOS_TOS(tos) ((tos) & IPTOS_TOS_MASK)
+ #define IPTOS_LOWDELAY 0x10
+@@ -187,7 +202,13 @@ typedef struct ip_mreq {
+ #define IPTOS_MINCOST IPTOS_LOWCOST
+
+ /*
+- * Definitions for IP precedence (also in ip_tos) (Unimplemented)
++ * The Network Control precedence designation is intended to be used
++ * within a network only. The actual use and control of that
++ * designation is up to each network. The Internetwork Control
++ * designation is intended for use by gateway control originators only.
++ * If the actual use of these precedence designations is of concern to
++ * a particular network, it is the responsibility of that network to
++ * control the access to, and use of, those precedence designations.
+ */
+ #define IPTOS_PREC_MASK 0xe0
+ #define IPTOS_PREC(tos) ((tos) & IPTOS_PREC_MASK)
+Index: src/include/lwip/stats.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/stats.h,v
+retrieving revision 1.19
+retrieving revision 1.23
+diff -u -p -r1.19 -r1.23
+--- a/src/include/lwip/stats.h 28 Nov 2007 21:25:07 -0000 1.19
++++ b/src/include/lwip/stats.h 8 Jul 2008 09:15:57 -0000 1.23
+@@ -57,7 +57,6 @@ extern "C" {
+
+ struct stats_proto {
+ STAT_COUNTER xmit; /* Transmitted packets. */
+- STAT_COUNTER rexmit; /* Retransmitted packets. */
+ STAT_COUNTER recv; /* Received packets. */
+ STAT_COUNTER fw; /* Forwarded packets. */
+ STAT_COUNTER drop; /* Dropped packets. */
+@@ -87,7 +86,8 @@ struct stats_mem {
+ mem_size_t avail;
+ mem_size_t used;
+ mem_size_t max;
+- mem_size_t err;
++ STAT_COUNTER err;
++ STAT_COUNTER illegal;
+ };
+
+ struct stats_syselem {
+@@ -142,64 +142,138 @@ extern struct stats_ lwip_stats;
+ #define stats_init() /* Compatibility define, not init needed. */
+
+ #define STATS_INC(x) ++lwip_stats.x
++#define STATS_DEC(x) --lwip_stats.x
+ #else
+ #define stats_init()
+ #define STATS_INC(x)
++#define STATS_DEC(x)
+ #endif /* LWIP_STATS */
+
+ #if TCP_STATS
+ #define TCP_STATS_INC(x) STATS_INC(x)
++#define TCP_STATS_DISPLAY() stats_display_proto(&lwip_stats.tcp, "TCP")
+ #else
+ #define TCP_STATS_INC(x)
++#define TCP_STATS_DISPLAY()
+ #endif
+
+ #if UDP_STATS
+ #define UDP_STATS_INC(x) STATS_INC(x)
++#define UDP_STATS_DISPLAY() stats_display_proto(&lwip_stats.udp, "UDP")
+ #else
+ #define UDP_STATS_INC(x)
++#define UDP_STATS_DISPLAY()
+ #endif
+
+ #if ICMP_STATS
+ #define ICMP_STATS_INC(x) STATS_INC(x)
++#define ICMP_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp, "ICMP")
+ #else
+ #define ICMP_STATS_INC(x)
++#define ICMP_STATS_DISPLAY()
+ #endif
+
+ #if IGMP_STATS
+ #define IGMP_STATS_INC(x) STATS_INC(x)
++#define IGMP_STATS_DISPLAY() stats_display_igmp(&lwip_stats.igmp)
+ #else
+ #define IGMP_STATS_INC(x)
++#define IGMP_STATS_DISPLAY()
+ #endif
+
+ #if IP_STATS
+ #define IP_STATS_INC(x) STATS_INC(x)
++#define IP_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip, "IP")
+ #else
+ #define IP_STATS_INC(x)
++#define IP_STATS_DISPLAY()
+ #endif
+
+ #if IPFRAG_STATS
+ #define IPFRAG_STATS_INC(x) STATS_INC(x)
++#define IPFRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip_frag, "IP_FRAG")
+ #else
+ #define IPFRAG_STATS_INC(x)
++#define IPFRAG_STATS_DISPLAY()
+ #endif
+
+ #if ETHARP_STATS
+ #define ETHARP_STATS_INC(x) STATS_INC(x)
++#define ETHARP_STATS_DISPLAY() stats_display_proto(&lwip_stats.etharp, "ETHARP")
+ #else
+ #define ETHARP_STATS_INC(x)
++#define ETHARP_STATS_DISPLAY()
+ #endif
+
+ #if LINK_STATS
+ #define LINK_STATS_INC(x) STATS_INC(x)
++#define LINK_STATS_DISPLAY() stats_display_proto(&lwip_stats.link, "LINK")
+ #else
+ #define LINK_STATS_INC(x)
++#define LINK_STATS_DISPLAY()
++#endif
++
++#if MEM_STATS
++#define MEM_STATS_AVAIL(x, y) lwip_stats.mem.x = y
++#define MEM_STATS_INC(x) STATS_INC(mem.x)
++#define MEM_STATS_INC_USED(x, y) do { lwip_stats.mem.used += y; \
++ if (lwip_stats.mem.max < lwip_stats.mem.used) { \
++ lwip_stats.mem.max = lwip_stats.mem.used; \
++ } \
++ } while(0)
++#define MEM_STATS_DEC_USED(x, y) lwip_stats.mem.x -= y
++#define MEM_STATS_DISPLAY() stats_display_mem(&lwip_stats.mem, "HEAP")
++#else
++#define MEM_STATS_AVAIL(x, y)
++#define MEM_STATS_INC(x)
++#define MEM_STATS_INC_USED(x, y)
++#define MEM_STATS_DEC_USED(x, y)
++#define MEM_STATS_DISPLAY()
++#endif
++
++#if MEMP_STATS
++#define MEMP_STATS_AVAIL(x, i, y) lwip_stats.memp[i].x = y
++#define MEMP_STATS_INC(x, i) STATS_INC(memp[i].x)
++#define MEMP_STATS_DEC(x, i) STATS_DEC(memp[i].x)
++#define MEMP_STATS_INC_USED(x, i) do { ++lwip_stats.memp[i].used; \
++ if (lwip_stats.memp[i].max < lwip_stats.memp[i].used) { \
++ lwip_stats.memp[i].max = lwip_stats.memp[i].used; \
++ } \
++ } while(0)
++#define MEMP_STATS_DISPLAY(i) stats_display_memp(&lwip_stats.memp[i], i)
++#else
++#define MEMP_STATS_AVAIL(x, i, y)
++#define MEMP_STATS_INC(x, i)
++#define MEMP_STATS_DEC(x, i)
++#define MEMP_STATS_INC_USED(x, i)
++#define MEMP_STATS_DISPLAY(i)
++#endif
++
++#if SYS_STATS
++#define SYS_STATS_INC(x) STATS_INC(sys.x)
++#define SYS_STATS_DEC(x) STATS_DEC(sys.x)
++#define SYS_STATS_DISPLAY() stats_display_sys(&lwip_stats.sys)
++#else
++#define SYS_STATS_INC(x)
++#define SYS_STATS_DEC(x)
++#define SYS_STATS_DISPLAY()
+ #endif
+
+ /* Display of statistics */
+ #if LWIP_STATS_DISPLAY
+ void stats_display(void);
++void stats_display_proto(struct stats_proto *proto, char *name);
++void stats_display_igmp(struct stats_igmp *igmp);
++void stats_display_mem(struct stats_mem *mem, char *name);
++void stats_display_memp(struct stats_mem *mem, int index);
++void stats_display_sys(struct stats_sys *sys);
+ #else
+ #define stats_display()
++#define stats_display_proto(proto, name)
++#define stats_display_igmp(igmp)
++#define stats_display_mem(mem, name)
++#define stats_display_memp(mem, index)
++#define stats_display_sys(sys)
+ #endif /* LWIP_STATS_DISPLAY */
+
+ #ifdef __cplusplus
+Index: src/include/lwip/tcpip.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/lwip/tcpip.h,v
+retrieving revision 1.24
+retrieving revision 1.27
+diff -u -p -r1.24 -r1.27
+--- a/src/include/lwip/tcpip.h 12 Jan 2008 11:52:22 -0000 1.24
++++ b/src/include/lwip/tcpip.h 27 Jun 2008 20:34:55 -0000 1.27
+@@ -83,7 +83,11 @@ err_t tcpip_netifapi_lock(struct netifap
+ #endif /* LWIP_NETIF_API */
+
+ err_t tcpip_callback_with_block(void (*f)(void *ctx), void *ctx, u8_t block);
+-#define tcpip_callback(f,ctx) tcpip_callback_with_block(f,ctx,1)
++#define tcpip_callback(f, ctx) tcpip_callback_with_block(f, ctx, 1)
++
++/* free pbufs or heap memory from another context without blocking */
++err_t pbuf_free_callback(struct pbuf *p);
++err_t mem_free_callback(void *m);
+
+ err_t tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg);
+ #define tcpip_untimeout(h, arg) tcpip_timeout(0xffffffff, h, arg)
+Index: src/include/netif/loopif.h
+===================================================================
+RCS file: /sources/lwip/lwip/src/include/netif/loopif.h,v
+retrieving revision 1.7
+retrieving revision 1.9
+diff -u -p -r1.7 -r1.9
+--- a/src/include/netif/loopif.h 10 May 2007 10:59:20 -0000 1.7
++++ b/src/include/netif/loopif.h 17 Jun 2008 20:12:22 -0000 1.9
+@@ -32,6 +32,7 @@
+ #ifndef __NETIF_LOOPIF_H__
+ #define __NETIF_LOOPIF_H__
+
++#include "lwip/opt.h"
+ #include "lwip/netif.h"
+ #include "lwip/err.h"
+
+@@ -39,9 +40,9 @@
+ extern "C" {
+ #endif
+
+-#if !LWIP_LOOPIF_MULTITHREADING
+-void loopif_poll(struct netif *netif);
+-#endif
++#if !LWIP_NETIF_LOOPBACK_MULTITHREADING
++#define loopif_poll netif_poll
++#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */
+
+ err_t loopif_init(struct netif *netif);
+
+Index: src/netif/etharp.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/netif/etharp.c,v
+retrieving revision 1.145
+retrieving revision 1.148
+diff -u -p -r1.145 -r1.148
+--- a/src/netif/etharp.c 4 Mar 2008 13:41:24 -0000 1.145
++++ b/src/netif/etharp.c 19 Jun 2008 16:40:59 -0000 1.148
+@@ -353,7 +353,7 @@ find_entry(struct ip_addr *ipaddr, u8_t
+ * 1) empty entry
+ * 2) oldest stable entry
+ * 3) oldest pending entry without queued packets
+- * 4) oldest pending entry without queued packets
++ * 4) oldest pending entry with queued packets
+ *
+ * { ETHARP_TRY_HARD is set at this point }
+ */
+@@ -1130,7 +1130,14 @@ ethernet_input(struct pbuf *p, struct ne
+
+ /* points to packet payload, which starts with an Ethernet header */
+ ethhdr = p->payload;
+-
++ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE,
++ ("ethernet_input: dest:%02x:%02x:%02x:%02x:%02x:%02x, src:%02x:%02x:%02x:%02x:%02x:%02x, type:%2hx\n",
++ (unsigned)ethhdr->dest.addr[0], (unsigned)ethhdr->dest.addr[1], (unsigned)ethhdr->dest.addr[2],
++ (unsigned)ethhdr->dest.addr[3], (unsigned)ethhdr->dest.addr[4], (unsigned)ethhdr->dest.addr[5],
++ (unsigned)ethhdr->src.addr[0], (unsigned)ethhdr->src.addr[1], (unsigned)ethhdr->src.addr[2],
++ (unsigned)ethhdr->src.addr[3], (unsigned)ethhdr->src.addr[4], (unsigned)ethhdr->src.addr[5],
++ (unsigned)htons(ethhdr->type)));
++
+ switch (htons(ethhdr->type)) {
+ /* IP packet? */
+ case ETHTYPE_IP:
+@@ -1165,6 +1172,8 @@ ethernet_input(struct pbuf *p, struct ne
+ #endif /* PPPOE_SUPPORT */
+
+ default:
++ ETHARP_STATS_INC(etharp.proterr);
++ ETHARP_STATS_INC(etharp.drop);
+ pbuf_free(p);
+ p = NULL;
+ break;
+Index: src/netif/loopif.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/netif/loopif.c,v
+retrieving revision 1.26
+retrieving revision 1.27
+diff -u -p -r1.26 -r1.27
+--- a/src/netif/loopif.c 31 Aug 2007 10:14:09 -0000 1.26
++++ b/src/netif/loopif.c 12 Jun 2008 20:10:10 -0000 1.27
+@@ -40,149 +40,8 @@
+ #if LWIP_HAVE_LOOPIF
+
+ #include "netif/loopif.h"
+-#include "lwip/pbuf.h"
+ #include "lwip/snmp.h"
+
+-#include <string.h>
+-
+-#if !LWIP_LOOPIF_MULTITHREADING
+-
+-#include "lwip/sys.h"
+-#include "lwip/mem.h"
+-
+-/* helper struct for the linked list of pbufs */
+-struct loopif_private {
+- struct pbuf *first;
+- struct pbuf *last;
+-};
+-
+-/**
+- * Call loopif_poll() in the main loop of your application. This is to prevent
+- * reentering non-reentrant functions like tcp_input(). Packets passed to
+- * loopif_output() are put on a list that is passed to netif->input() by
+- * loopif_poll().
+- *
+- * @param netif the lwip network interface structure for this loopif
+- */
+-void
+-loopif_poll(struct netif *netif)
+-{
+- SYS_ARCH_DECL_PROTECT(lev);
+- struct pbuf *in, *in_end;
+- struct loopif_private *priv = (struct loopif_private*)netif->state;
+-
+- LWIP_ERROR("priv != NULL", (priv != NULL), return;);
+-
+- do {
+- /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */
+- SYS_ARCH_PROTECT(lev);
+- in = priv->first;
+- if(in) {
+- in_end = in;
+- while(in_end->len != in_end->tot_len) {
+- LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL);
+- in_end = in_end->next;
+- }
+- /* 'in_end' now points to the last pbuf from 'in' */
+- if(in_end == priv->last) {
+- /* this was the last pbuf in the list */
+- priv->first = priv->last = NULL;
+- } else {
+- /* pop the pbuf off the list */
+- priv->first = in_end->next;
+- LWIP_ASSERT("should not be null since first != last!", priv->first != NULL);
+- }
+- }
+- SYS_ARCH_UNPROTECT(lev);
+-
+- if(in != NULL) {
+- if(in_end->next != NULL) {
+- /* De-queue the pbuf from its successors on the 'priv' list. */
+- in_end->next = NULL;
+- }
+- if(netif->input(in, netif) != ERR_OK) {
+- pbuf_free(in);
+- }
+- /* Don't reference the packet any more! */
+- in = NULL;
+- in_end = NULL;
+- }
+- /* go on while there is a packet on the list */
+- } while(priv->first != NULL);
+-}
+-#endif /* LWIP_LOOPIF_MULTITHREADING */
+-
+-/**
+- * Send an IP packet over the loopback interface.
+- * The pbuf is simply copied and handed back to netif->input.
+- * In multithreaded mode, this is done directly since netif->input must put
+- * the packet on a queue.
+- * In callback mode, the packet is put on an internal queue and is fed to
+- * netif->input by loopif_poll().
+- *
+- * @param netif the lwip network interface structure for this loopif
+- * @param p the (IP) packet to 'send'
+- * @param ipaddr the ip address to send the packet to (not used for loopif)
+- * @return ERR_OK if the packet has been sent
+- * ERR_MEM if the pbuf used to copy the packet couldn't be allocated
+- */
+-static err_t
+-loopif_output(struct netif *netif, struct pbuf *p,
+- struct ip_addr *ipaddr)
+-{
+-#if !LWIP_LOOPIF_MULTITHREADING
+- SYS_ARCH_DECL_PROTECT(lev);
+- struct loopif_private *priv;
+- struct pbuf *last;
+-#endif /* LWIP_LOOPIF_MULTITHREADING */
+- struct pbuf *r;
+- err_t err;
+-
+- LWIP_UNUSED_ARG(ipaddr);
+-
+- /* Allocate a new pbuf */
+- r = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
+- if (r == NULL) {
+- return ERR_MEM;
+- }
+-
+- /* Copy the whole pbuf queue p into the single pbuf r */
+- if ((err = pbuf_copy(r, p)) != ERR_OK) {
+- pbuf_free(r);
+- r = NULL;
+- return err;
+- }
+-
+-#if LWIP_LOOPIF_MULTITHREADING
+- /* Multithreading environment, netif->input() is supposed to put the packet
+- into a mailbox, so we can safely call it here without risking to re-enter
+- functions that are not reentrant (TCP!!!) */
+- if(netif->input(r, netif) != ERR_OK) {
+- pbuf_free(r);
+- r = NULL;
+- }
+-#else /* LWIP_LOOPIF_MULTITHREADING */
+- /* Raw API without threads: put the packet on a linked list which gets emptied
+- through calling loopif_poll(). */
+- priv = (struct loopif_private*)netif->state;
+-
+- /* let last point to the last pbuf in chain r */
+- for (last = r; last->next != NULL; last = last->next);
+- SYS_ARCH_PROTECT(lev);
+- if(priv->first != NULL) {
+- LWIP_ASSERT("if first != NULL, last must also be != NULL", priv->last != NULL);
+- priv->last->next = r;
+- priv->last = last;
+- } else {
+- priv->first = r;
+- priv->last = last;
+- }
+- SYS_ARCH_UNPROTECT(lev);
+-#endif /* LWIP_LOOPIF_MULTITHREADING */
+-
+- return ERR_OK;
+-}
+-
+ /**
+ * Initialize a lwip network interface structure for a loopback interface
+ *
+@@ -193,16 +52,6 @@ loopif_output(struct netif *netif, struc
+ err_t
+ loopif_init(struct netif *netif)
+ {
+-#if !LWIP_LOOPIF_MULTITHREADING
+- struct loopif_private *priv;
+-
+- priv = (struct loopif_private*)mem_malloc(sizeof(struct loopif_private));
+- if(priv == NULL)
+- return ERR_MEM;
+- priv->first = priv->last = NULL;
+- netif->state = priv;
+-#endif /* LWIP_LOOPIF_MULTITHREADING */
+-
+ /* initialize the snmp variables and counters inside the struct netif
+ * ifSpeed: no assumption can be made!
+ */
+@@ -210,7 +59,7 @@ loopif_init(struct netif *netif)
+
+ netif->name[0] = 'l';
+ netif->name[1] = 'o';
+- netif->output = loopif_output;
++ netif->output = netif_loop_output;
+ return ERR_OK;
+ }
+
+Index: src/netif/slipif.c
+===================================================================
+RCS file: /sources/lwip/lwip/src/netif/slipif.c,v
+retrieving revision 1.29
+retrieving revision 1.30
+diff -u -p -r1.29 -r1.30
+--- a/src/netif/slipif.c 30 Nov 2007 17:22:21 -0000 1.29
++++ b/src/netif/slipif.c 17 Jun 2008 20:14:05 -0000 1.30
+@@ -44,6 +44,9 @@
+
+ #include "netif/slipif.h"
+ #include "lwip/opt.h"
++
++#if LWIP_HAVE_SLIPIF
++
+ #include "lwip/def.h"
+ #include "lwip/pbuf.h"
+ #include "lwip/sys.h"
+@@ -273,3 +276,4 @@ slipif_init(struct netif *netif)
+ sys_thread_new(SLIPIF_THREAD_NAME, slipif_loop, netif, SLIPIF_THREAD_STACKSIZE, SLIPIF_THREAD_PRIO);
+ return ERR_OK;
+ }
++#endif /* LWIP_HAVE_SLIPIF */
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/mini-os_udivmoddi4-gcc7.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/mini-os_udivmoddi4-gcc7.patch
new file mode 100644
index 000000000..a3745bda6
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/mini-os_udivmoddi4-gcc7.patch
@@ -0,0 +1,43 @@
+From d991bdbc062248221511ecb795617c36b37e1d2e Mon Sep 17 00:00:00 2001
+From: Wei Liu <wei.liu2@citrix.com>
+Date: Wed, 9 Aug 2017 13:15:48 +0100
+Subject: [PATCH] lib/math.c: implement __udivmoddi4
+
+Some code compiled by gcc 7 requires this.
+
+Signed-off-by: Wei Liu <wei.liu2@citrix.com>
+Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+---
+ lib/math.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/lib/math.c b/lib/math.c
+index 561393e..b98cc1d 100644
+--- a/lib/math.c
++++ b/lib/math.c
+@@ -6,6 +6,7 @@
+ * File: math.c
+ * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
+ * Changes:
++ * Implement __udivmoddi4 (Wei Liu <wei.liu2@citrix.com>)
+ *
+ * Date: Aug 2003
+ *
+@@ -397,6 +398,15 @@ __umoddi3(u_quad_t a, u_quad_t b)
+ }
+
+ /*
++ * Returns the quotient and places remainder in r
++ */
++u_quad_t
++__udivmoddi4(u_quad_t a, u_quad_t b, u_quad_t *r)
++{
++ return __qdivrem(a, b, r);
++}
++
++/*
+ * From
+ * moddi3.c
+ */
+--
+2.11.0
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/newlib-chk.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/newlib-chk.patch
new file mode 100644
index 000000000..a5d014958
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/newlib-chk.patch
@@ -0,0 +1,155 @@
+--- a/newlib/libc/stdio/fprintf_chk.c 1969-12-31 19:00:00.000000000 -0500
++++ b/newlib/libc/stdio/fprintf_chk.c 2009-02-26 19:02:53.000000000 -0500
+@@ -0,0 +1,21 @@
++#include <stdarg.h>
++#include <stdio.h>
++
++/*
++ * Stub implementation of __fprintf_chk adapted from glibc 2.7. This
++ * doesn't actually implement any buffer overflow protection. It just makes
++ * the linker happy :)
++*/
++int
++__fprintf_chk (FILE *fp, int flag, const char *format, ...)
++{
++ va_list ap;
++ int done;
++
++ va_start (ap, format);
++ done = vfprintf (fp, format, ap);
++ va_end (ap);
++
++ return done;
++}
++
+--- a/newlib/libc/stdio/Makefile.am 2007-08-02 16:23:06.000000000 -0400
++++ b/newlib/libc/stdio/Makefile.am 2009-02-26 18:14:53.000000000 -0500
+@@ -20,6 +20,7 @@
+ flags.c \
+ fopen.c \
+ fprintf.c \
++ fprintf_chk.c \
+ fputc.c \
+ fputs.c \
+ fread.c \
+@@ -65,6 +66,7 @@
+ sniprintf.c \
+ snprintf.c \
+ sprintf.c \
++ sprintf_chk.c \
+ sscanf.c \
+ stdio.c \
+ tmpfile.c \
+--- a/newlib/libc/stdio/Makefile.in 2007-12-19 17:36:38.000000000 -0500
++++ b/newlib/libc/stdio/Makefile.in 2009-02-26 18:43:52.000000000 -0500
+@@ -63,7 +63,8 @@
+ lib_a-fgets.$(OBJEXT) lib_a-fileno.$(OBJEXT) \
+ lib_a-findfp.$(OBJEXT) lib_a-fiprintf.$(OBJEXT) \
+ lib_a-flags.$(OBJEXT) lib_a-fopen.$(OBJEXT) \
+- lib_a-fprintf.$(OBJEXT) lib_a-fputc.$(OBJEXT) \
++ lib_a-fprintf.$(OBJEXT) lib_a-fprintf_chk.$(OBJEXT) \
++ lib_a-fputc.$(OBJEXT) \
+ lib_a-fputs.$(OBJEXT) lib_a-fread.$(OBJEXT) \
+ lib_a-freopen.$(OBJEXT) lib_a-fscanf.$(OBJEXT) \
+ lib_a-fiscanf.$(OBJEXT) lib_a-fseek.$(OBJEXT) \
+@@ -86,6 +87,7 @@
+ lib_a-setvbuf.$(OBJEXT) lib_a-siprintf.$(OBJEXT) \
+ lib_a-siscanf.$(OBJEXT) lib_a-sniprintf.$(OBJEXT) \
+ lib_a-snprintf.$(OBJEXT) lib_a-sprintf.$(OBJEXT) \
++ lib_a-sprintf_chk.$(OBJEXT) \
+ lib_a-sscanf.$(OBJEXT) lib_a-stdio.$(OBJEXT) \
+ lib_a-tmpfile.$(OBJEXT) lib_a-tmpnam.$(OBJEXT) \
+ lib_a-ungetc.$(OBJEXT) lib_a-vdiprintf.$(OBJEXT) \
+@@ -122,15 +124,15 @@
+ LTLIBRARIES = $(noinst_LTLIBRARIES)
+ am__objects_4 = clearerr.lo fclose.lo fdopen.lo feof.lo ferror.lo \
+ fflush.lo fgetc.lo fgetpos.lo fgets.lo fileno.lo findfp.lo \
+- fiprintf.lo flags.lo fopen.lo fprintf.lo fputc.lo fputs.lo \
+- fread.lo freopen.lo fscanf.lo fiscanf.lo fseek.lo fsetpos.lo \
++ fiprintf.lo flags.lo fopen.lo fprintf.lo fprintf_chk.lo fputc.lo \
++ fputs.lo fread.lo freopen.lo fscanf.lo fiscanf.lo fseek.lo fsetpos.lo \
+ ftell.lo fvwrite.lo fwalk.lo fwrite.lo getc.lo getchar.lo \
+ getc_u.lo getchar_u.lo getdelim.lo getline.lo gets.lo \
+ iprintf.lo iscanf.lo makebuf.lo perror.lo printf.lo putc.lo \
+ putchar.lo putc_u.lo putchar_u.lo puts.lo refill.lo remove.lo \
+ rename.lo rewind.lo rget.lo scanf.lo sccl.lo setbuf.lo \
+ setbuffer.lo setlinebuf.lo setvbuf.lo siprintf.lo siscanf.lo \
+- sniprintf.lo snprintf.lo sprintf.lo sscanf.lo stdio.lo \
++ sniprintf.lo snprintf.lo sprintf.lo sprintf_chk.lo sscanf.lo stdio.lo \
+ tmpfile.lo tmpnam.lo ungetc.lo vdiprintf.lo vdprintf.lo \
+ viprintf.lo viscanf.lo vprintf.lo vscanf.lo vsiprintf.lo \
+ vsiscanf.lo vsnprintf.lo vsniprintf.lo vsprintf.lo vsscanf.lo \
+@@ -344,6 +346,7 @@
+ flags.c \
+ fopen.c \
+ fprintf.c \
++ fprintf_chk.c \
+ fputc.c \
+ fputs.c \
+ fread.c \
+@@ -389,6 +392,7 @@
+ sniprintf.c \
+ snprintf.c \
+ sprintf.c \
++ sprintf_chk.c \
+ sscanf.c \
+ stdio.c \
+ tmpfile.c \
+@@ -508,6 +512,7 @@
+ siprintf.def \
+ siscanf.def \
+ sprintf.def \
++ sprintf_chk.def \
+ sscanf.def \
+ tmpfile.def \
+ tmpnam.def \
+@@ -678,6 +683,12 @@
+ lib_a-fprintf.obj: fprintf.c
+ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-fprintf.obj `if test -f 'fprintf.c'; then $(CYGPATH_W) 'fprintf.c'; else $(CYGPATH_W) '$(srcdir)/fprintf.c'; fi`
+
++lib_a-fprintf_chk.o: fprintf_chk.c
++ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-fprintf_chk.o `test -f 'fprintf_chk.c' || echo '$(srcdir)/'`fprintf_chk.c
++
++lib_a-fprintf_chk.obj: fprintf_chk.c
++ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-fprintf_chk.obj `if test -f 'fprintf_chk.c'; then $(CYGPATH_W) 'fprintf_chk.c'; else $(CYGPATH_W) '$(srcdir)/fprintf_chk.c'; fi`
++
+ lib_a-fputc.o: fputc.c
+ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-fputc.o `test -f 'fputc.c' || echo '$(srcdir)/'`fputc.c
+
+@@ -948,6 +959,12 @@
+ lib_a-sprintf.obj: sprintf.c
+ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-sprintf.obj `if test -f 'sprintf.c'; then $(CYGPATH_W) 'sprintf.c'; else $(CYGPATH_W) '$(srcdir)/sprintf.c'; fi`
+
++lib_a-sprintf_chk.o: sprintf_chk.c
++ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-sprintf_chk.o `test -f 'sprintf_chk.c' || echo '$(srcdir)/'`sprintf_chk.c
++
++lib_a-sprintf_chk.obj: sprintf_chk.c
++ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-sprintf_chk.obj `if test -f 'sprintf_chk.c'; then $(CYGPATH_W) 'sprintf_chk.c'; else $(CYGPATH_W) '$(srcdir)/sprintf_chk.c'; fi`
++
+ lib_a-sscanf.o: sscanf.c
+ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-sscanf.o `test -f 'sscanf.c' || echo '$(srcdir)/'`sscanf.c
+
+--- a/newlib/libc/stdio/sprintf_chk.c 1969-12-31 19:00:00.000000000 -0500
++++ b/newlib/libc/stdio/sprintf_chk.c 2009-02-26 19:02:26.000000000 -0500
+@@ -0,0 +1,21 @@
++#include <stdarg.h>
++#include <stdio.h>
++
++/*
++ * Stub implementation of __sprintf_chk adapted from glibc 2.7. This
++ * doesn't actually implement any buffer overflow protection. It just makes
++ * the linker happy :)
++*/
++int
++__sprintf_chk (char *s, int flags, size_t slen, const char *format, ...)
++{
++ va_list arg;
++ int done;
++
++ va_start (arg, format);
++ done = vsprintf (s, format, arg);
++ va_end (arg);
++
++ return done;
++}
++
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/newlib-stdint-size_max-fix-from-1.17.0.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/newlib-stdint-size_max-fix-from-1.17.0.patch
new file mode 100644
index 000000000..3610d646d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/newlib-stdint-size_max-fix-from-1.17.0.patch
@@ -0,0 +1,16 @@
+--- a/newlib/libc/include/stdint.h.orig 2006-08-17 00:39:43.000000000 +0300
++++ b/newlib/libc/include/stdint.h 2009-08-25 17:33:23.000000000 +0300
+@@ -348,8 +348,11 @@
+ #endif
+
+ /* This must match size_t in stddef.h, currently long unsigned int */
+-#define SIZE_MIN (-__STDINT_EXP(LONG_MAX) - 1L)
+-#define SIZE_MAX __STDINT_EXP(LONG_MAX)
++#ifdef __SIZE_MAX__
++#define SIZE_MAX __SIZE_MAX__
++#else
++#define SIZE_MAX (__STDINT_EXP(LONG_MAX) * 2UL + 1)
++#endif
+
+ /* This must match sig_atomic_t in <signal.h> (currently int) */
+ #define SIG_ATOMIC_MIN (-__STDINT_EXP(INT_MAX) - 1)
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/newlib.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/newlib.patch
new file mode 100644
index 000000000..dbf409a2d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/newlib.patch
@@ -0,0 +1,727 @@
+There is a mix between longs and long longs.
+
+Index: newlib/libc/include/inttypes.h
+===================================================================
+RCS file: /cvs/src/src/newlib/libc/include/inttypes.h,v
+retrieving revision 1.3
+diff -u -p -r1.3 inttypes.h
+--- a/newlib/libc/include/inttypes.h 16 Dec 2005 19:03:12 -0000 1.3
++++ b/newlib/libc/include/inttypes.h 8 Nov 2007 16:32:44 -0000
+@@ -163,12 +163,12 @@
+
+
+ /* 64-bit types */
+-#if __have_longlong64
+-#define __PRI64(x) __STRINGIFY(ll##x)
+-#define __SCN64(x) __STRINGIFY(ll##x)
+-#elif __have_long64
++#if __have_long64
+ #define __PRI64(x) __STRINGIFY(l##x)
+ #define __SCN64(x) __STRINGIFY(l##x)
++#elif __have_longlong64
++#define __PRI64(x) __STRINGIFY(ll##x)
++#define __SCN64(x) __STRINGIFY(ll##x)
+ #else
+ #define __PRI64(x) __STRINGIFY(x)
+ #define __SCN64(x) __STRINGIFY(x)
+@@ -217,12 +217,12 @@
+ #endif
+
+ /* max-bit types */
+-#if __have_longlong64
+-#define __PRIMAX(x) __STRINGIFY(ll##x)
+-#define __SCNMAX(x) __STRINGIFY(ll##x)
+-#elif __have_long64
++#if __have_long64
+ #define __PRIMAX(x) __STRINGIFY(l##x)
+ #define __SCNMAX(x) __STRINGIFY(l##x)
++#elif __have_longlong64
++#define __PRIMAX(x) __STRINGIFY(ll##x)
++#define __SCNMAX(x) __STRINGIFY(ll##x)
+ #else
+ #define __PRIMAX(x) __STRINGIFY(x)
+ #define __SCNMAX(x) __STRINGIFY(x)
+@@ -242,12 +242,12 @@
+ #define SCNxMAX __SCNMAX(x)
+
+ /* ptr types */
+-#if __have_longlong64
+-#define __PRIPTR(x) __STRINGIFY(ll##x)
+-#define __SCNPTR(x) __STRINGIFY(ll##x)
+-#elif __have_long64
++#if __have_long64
+ #define __PRIPTR(x) __STRINGIFY(l##x)
+ #define __SCNPTR(x) __STRINGIFY(l##x)
++#elif __have_longlong64
++#define __PRIPTR(x) __STRINGIFY(ll##x)
++#define __SCNPTR(x) __STRINGIFY(ll##x)
+ #else
+ #define __PRIPTR(x) __STRINGIFY(x)
+ #define __SCNPTR(x) __STRINGIFY(x)
+
+We don't want u?int32_t to be long as our code assume in a lot of places to be
+int.
+
+Index: newlib/libc/include/stdint.h
+===================================================================
+RCS file: /cvs/src/src/newlib/libc/include/stdint.h,v
+retrieving revision 1.10
+diff -u -p -r1.10 stdint.h
+--- a/newlib/libc/include/stdint.h 16 Aug 2006 21:39:43 -0000 1.10
++++ b/newlib/libc/include/stdint.h 12 Feb 2008 13:07:52 -0000
+@@ -38,7 +38,7 @@ extern "C" {
+ #if __STDINT_EXP(LONG_MAX) > 0x7fffffff
+ #define __have_long64 1
+ #elif __STDINT_EXP(LONG_MAX) == 0x7fffffff && !defined(__SPU__)
+-#define __have_long32 1
++/* #define __have_long32 1 */
+ #endif
+
+ #if __STDINT_EXP(SCHAR_MAX) == 0x7f
+
+Define the basic ia64 jump buffer
+
+Index: newlib/libc/include/sys/config.h
+===================================================================
+RCS file: /cvs/src/src/newlib/libc/include/sys/config.h,v
+retrieving revision 1.47
+diff -u -p -r1.47 config.h
+--- a/newlib/libc/include/sys/config.h 15 Mar 2007 21:32:12 -0000 1.47
++++ b/newlib/libc/include/sys/config.h 8 Nov 2007 16:32:44 -0000
+@@ -71,6 +71,10 @@
+ #endif
+ #endif
+
++#ifndef __DYNAMIC_REENT__
++#define __DYNAMIC_REENT__
++#endif
++
+ #ifdef __mn10200__
+ #define __SMALL_BITFIELDS
+ #endif
+
+Dynamic pointer to our reentrancy zone
+
+Index: newlib/libc/reent/getreent.c
+===================================================================
+RCS file: /cvs/src/src/newlib/libc/reent/getreent.c,v
+retrieving revision 1.2
+diff -u -p -r1.2 getreent.c
+--- a/newlib/libc/reent/getreent.c 7 Sep 2007 00:45:55 -0000 1.2
++++ b/newlib/libc/reent/getreent.c 8 Nov 2007 16:32:44 -0000
+@@ -3,12 +3,20 @@
+ #include <_ansi.h>
+ #include <reent.h>
+
++#define weak_alias(name, aliasname) \
++ extern __typeof (name) aliasname __attribute__ ((weak, alias (#name)));
++
+ #ifdef __getreent
+ #undef __getreent
+ #endif
++#ifdef __libc_getreent
++#undef __libc_getreent
++#endif
+
+ struct _reent *
+-_DEFUN_VOID(__getreent)
++__libc_getreent (void)
+ {
+ return _impure_ptr;
+ }
++weak_alias(__libc_getreent,__getreent)
++
+
+We can't provide a red zone in mini-os.
+
+Index: newlib/libc/machine/x86_64/memcpy.S
+===================================================================
+RCS file: /cvs/src/src/newlib/libc/machine/x86_64/memcpy.S,v
+retrieving revision 1.1
+diff -u -p -r1.1 memcpy.S
+--- a/newlib/libc/machine/x86_64/memcpy.S 28 Aug 2007 21:56:49 -0000 1.1
++++ b/newlib/libc/machine/x86_64/memcpy.S 8 Nov 2007 16:32:44 -0000
+@@ -30,10 +30,18 @@ quadword_aligned:
+ cmpq $256, rdx
+ jb quadword_copy
+
++#if 1
++ subq $32, rsp
++ movq rax, 24 (rsp)
++ movq r12, 16 (rsp)
++ movq r13, 8 (rsp)
++ movq r14, 0 (rsp)
++#else
+ movq rax, -8 (rsp)
+ movq r12, -16 (rsp)
+ movq r13, -24 (rsp)
+ movq r14, -32 (rsp)
++#endif
+
+ movq rdx, rcx /* Copy 128 bytes at a time with minimum cache polution */
+ shrq $7, rcx
+@@ -89,10 +97,18 @@ loop:
+ movq rdx, rcx
+ andq $127, rcx
+ rep movsb
++#if 1
++ movq 24 (rsp), rax
++ movq 16 (rsp), r12
++ movq 8 (rsp), r13
++ movq 0 (rsp), r14
++ addq $32, rsp
++#else
+ movq -8 (rsp), rax
+ movq -16 (rsp), r12
+ movq -24 (rsp), r13
+ movq -32 (rsp), r14
++#endif
+ ret
+
+
+--- a/newlib/libc/machine/x86_64/x86_64mach.h.orig 2008-07-11 14:57:23.062269000 +0100
++++ b/newlib/libc/machine/x86_64/x86_64mach.h 2008-07-11 14:58:01.262503000 +0100
+@@ -22,81 +22,81 @@
+
+ #define REG(x) CONCAT1(__REG_PREFIX__, x)
+
+-#define rax REG(rax)
+-#define rbx REG(rbx)
+-#define rcx REG(rcx)
+-#define rdx REG(rdx)
+-#define rsi REG(rsi)
+-#define rdi REG(rdi)
+-#define rbp REG(rbp)
+-#define rsp REG(rsp)
+-
+-#define r8 REG(r8)
+-#define r9 REG(r9)
+-#define r10 REG(r10)
+-#define r11 REG(r11)
+-#define r12 REG(r12)
+-#define r13 REG(r13)
+-#define r14 REG(r14)
+-#define r15 REG(r15)
+-
+-#define eax REG(eax)
+-#define ebx REG(ebx)
+-#define ecx REG(ecx)
+-#define edx REG(edx)
+-#define esi REG(esi)
+-#define edi REG(edi)
+-#define ebp REG(ebp)
+-#define esp REG(esp)
+-
+-#define st0 REG(st)
+-#define st1 REG(st(1))
+-#define st2 REG(st(2))
+-#define st3 REG(st(3))
+-#define st4 REG(st(4))
+-#define st5 REG(st(5))
+-#define st6 REG(st(6))
+-#define st7 REG(st(7))
+-
+-#define ax REG(ax)
+-#define bx REG(bx)
+-#define cx REG(cx)
+-#define dx REG(dx)
+-
+-#define ah REG(ah)
+-#define bh REG(bh)
+-#define ch REG(ch)
+-#define dh REG(dh)
+-
+-#define al REG(al)
+-#define bl REG(bl)
+-#define cl REG(cl)
+-#define dl REG(dl)
+-
+-#define sil REG(sil)
+-
+-#define mm1 REG(mm1)
+-#define mm2 REG(mm2)
+-#define mm3 REG(mm3)
+-#define mm4 REG(mm4)
+-#define mm5 REG(mm5)
+-#define mm6 REG(mm6)
+-#define mm7 REG(mm7)
+-
+-#define xmm0 REG(xmm0)
+-#define xmm1 REG(xmm1)
+-#define xmm2 REG(xmm2)
+-#define xmm3 REG(xmm3)
+-#define xmm4 REG(xmm4)
+-#define xmm5 REG(xmm5)
+-#define xmm6 REG(xmm6)
+-#define xmm7 REG(xmm7)
+-
+-#define cr0 REG(cr0)
+-#define cr1 REG(cr1)
+-#define cr2 REG(cr2)
+-#define cr3 REG(cr3)
+-#define cr4 REG(cr4)
++#define rax %rax
++#define rbx %rbx
++#define rcx %rcx
++#define rdx %rdx
++#define rsi %rsi
++#define rdi %rdi
++#define rbp %rbp
++#define rsp %rsp
++
++#define r8 %r8
++#define r9 %r9
++#define r10 %r10
++#define r11 %r11
++#define r12 %r12
++#define r13 %r13
++#define r14 %r14
++#define r15 %r15
++
++#define eax %eax
++#define ebx %ebx
++#define ecx %ecx
++#define edx %edx
++#define esi %esi
++#define edi %edi
++#define ebp %ebp
++#define esp %esp
++
++#define st0 %st
++#define st1 %st(1)
++#define st2 %st(2)
++#define st3 %st(3)
++#define st4 %st(4)
++#define st5 %st(5)
++#define st6 %st(6)
++#define st7 %st(7)
++
++#define ax %ax
++#define bx %bx
++#define cx %cx
++#define dx %dx
++
++#define ah %ah
++#define bh %bh
++#define ch %ch
++#define dh %dh
++
++#define al %al
++#define bl %bl
++#define cl %cl
++#define dl %dl
++
++#define sil %sil
++
++#define mm1 %mm1
++#define mm2 %mm2
++#define mm3 %mm3
++#define mm4 %mm4
++#define mm5 %mm5
++#define mm6 %mm6
++#define mm7 %mm7
++
++#define xmm0 %xmm0
++#define xmm1 %xmm1
++#define xmm2 %xmm2
++#define xmm3 %xmm3
++#define xmm4 %xmm4
++#define xmm5 %xmm5
++#define xmm6 %xmm6
++#define xmm7 %xmm7
++
++#define cr0 %cr0
++#define cr1 %cr1
++#define cr2 %cr2
++#define cr3 %cr3
++#define cr4 %cr4
+
+ #ifdef _I386MACH_NEED_SOTYPE_FUNCTION
+ #define SOTYPE_FUNCTION(sym) .type SYM(sym),@function
+--- a/newlib/libc/machine/x86_64/memcpy.S.orig 2008-07-11 15:12:27.494693000 +0100
++++ b/newlib/libc/machine/x86_64/memcpy.S 2008-07-11 15:12:29.448706000 +0100
+@@ -60,14 +60,14 @@
+ movq 48 (rsi), r13
+ movq 56 (rsi), r14
+
+- movntiq rax, (rdi)
+- movntiq r8 , 8 (rdi)
+- movntiq r9 , 16 (rdi)
+- movntiq r10, 24 (rdi)
+- movntiq r11, 32 (rdi)
+- movntiq r12, 40 (rdi)
+- movntiq r13, 48 (rdi)
+- movntiq r14, 56 (rdi)
++ movnti rax, (rdi)
++ movnti r8 , 8 (rdi)
++ movnti r9 , 16 (rdi)
++ movnti r10, 24 (rdi)
++ movnti r11, 32 (rdi)
++ movnti r12, 40 (rdi)
++ movnti r13, 48 (rdi)
++ movnti r14, 56 (rdi)
+
+ movq 64 (rsi), rax
+ movq 72 (rsi), r8
+@@ -78,14 +78,14 @@
+ movq 112 (rsi), r13
+ movq 120 (rsi), r14
+
+- movntiq rax, 64 (rdi)
+- movntiq r8 , 72 (rdi)
+- movntiq r9 , 80 (rdi)
+- movntiq r10, 88 (rdi)
+- movntiq r11, 96 (rdi)
+- movntiq r12, 104 (rdi)
+- movntiq r13, 112 (rdi)
+- movntiq r14, 120 (rdi)
++ movnti rax, 64 (rdi)
++ movnti r8 , 72 (rdi)
++ movnti r9 , 80 (rdi)
++ movnti r10, 88 (rdi)
++ movnti r11, 96 (rdi)
++ movnti r12, 104 (rdi)
++ movnti r13, 112 (rdi)
++ movnti r14, 120 (rdi)
+
+ leaq 128 (rsi), rsi
+ leaq 128 (rdi), rdi
+--- a/newlib/libc/machine/i386/i386mach.h 2000-08-28 18:50:06.000000000 +0100
++++ b/newlib/libc/machine/i386/i386mach.h 2008-07-11 15:17:13.874409000 +0100
+@@ -27,46 +27,46 @@
+
+ #define REG(x) CONCAT1(__REG_PREFIX__, x)
+
+-#define eax REG(eax)
+-#define ebx REG(ebx)
+-#define ecx REG(ecx)
+-#define edx REG(edx)
+-#define esi REG(esi)
+-#define edi REG(edi)
+-#define ebp REG(ebp)
+-#define esp REG(esp)
+-
+-#define st0 REG(st)
+-#define st1 REG(st(1))
+-#define st2 REG(st(2))
+-#define st3 REG(st(3))
+-#define st4 REG(st(4))
+-#define st5 REG(st(5))
+-#define st6 REG(st(6))
+-#define st7 REG(st(7))
+-
+-#define ax REG(ax)
+-#define bx REG(bx)
+-#define cx REG(cx)
+-#define dx REG(dx)
+-
+-#define ah REG(ah)
+-#define bh REG(bh)
+-#define ch REG(ch)
+-#define dh REG(dh)
+-
+-#define al REG(al)
+-#define bl REG(bl)
+-#define cl REG(cl)
+-#define dl REG(dl)
+-
+-#define mm1 REG(mm1)
+-#define mm2 REG(mm2)
+-#define mm3 REG(mm3)
+-#define mm4 REG(mm4)
+-#define mm5 REG(mm5)
+-#define mm6 REG(mm6)
+-#define mm7 REG(mm7)
++#define eax %eax
++#define ebx %ebx
++#define ecx %ecx
++#define edx %edx
++#define esi %esi
++#define edi %edi
++#define ebp %ebp
++#define esp %esp
++
++#define st0 %st
++#define st1 %st(1)
++#define st2 %st(2)
++#define st3 %st(3)
++#define st4 %st(4)
++#define st5 %st(5)
++#define st6 %st(6)
++#define st7 %st(7)
++
++#define ax %ax
++#define bx %bx
++#define cx %cx
++#define dx %dx
++
++#define ah %ah
++#define bh %bh
++#define ch %ch
++#define dh %dh
++
++#define al %al
++#define bl %bl
++#define cl %cl
++#define dl %dl
++
++#define mm1 %mm1
++#define mm2 %mm2
++#define mm3 %mm3
++#define mm4 %mm4
++#define mm5 %mm5
++#define mm6 %mm6
++#define mm7 %mm7
+
+ #ifdef _I386MACH_NEED_SOTYPE_FUNCTION
+ #define SOTYPE_FUNCTION(sym) .type SYM(sym),@function
+--- a/newlib/libc/machine/x86_64/memset.S 2007-08-28 22:56:49.000000000 +0100
++++ b/newlib/libc/machine/x86_64/memset.S 2008-07-11 15:16:59.098320000 +0100
+@@ -40,22 +40,22 @@
+
+ .p2align 4
+ loop:
+- movntiq rax, (rdi)
+- movntiq rax, 8 (rdi)
+- movntiq rax, 16 (rdi)
+- movntiq rax, 24 (rdi)
+- movntiq rax, 32 (rdi)
+- movntiq rax, 40 (rdi)
+- movntiq rax, 48 (rdi)
+- movntiq rax, 56 (rdi)
+- movntiq rax, 64 (rdi)
+- movntiq rax, 72 (rdi)
+- movntiq rax, 80 (rdi)
+- movntiq rax, 88 (rdi)
+- movntiq rax, 96 (rdi)
+- movntiq rax, 104 (rdi)
+- movntiq rax, 112 (rdi)
+- movntiq rax, 120 (rdi)
++ movnti rax, (rdi)
++ movnti rax, 8 (rdi)
++ movnti rax, 16 (rdi)
++ movnti rax, 24 (rdi)
++ movnti rax, 32 (rdi)
++ movnti rax, 40 (rdi)
++ movnti rax, 48 (rdi)
++ movnti rax, 56 (rdi)
++ movnti rax, 64 (rdi)
++ movnti rax, 72 (rdi)
++ movnti rax, 80 (rdi)
++ movnti rax, 88 (rdi)
++ movnti rax, 96 (rdi)
++ movnti rax, 104 (rdi)
++ movnti rax, 112 (rdi)
++ movnti rax, 120 (rdi)
+
+ leaq 128 (rdi), rdi
+
+--- a/newlib/libm/machine/i386/i386mach.h.orig 2008-07-11 15:30:37.367227000 +0100
++++ b/newlib/libm/machine/i386/i386mach.h 2008-07-11 15:30:55.232337000 +0100
+@@ -27,46 +27,46 @@
+
+ #define REG(x) CONCAT1(__REG_PREFIX__, x)
+
+-#define eax REG(eax)
+-#define ebx REG(ebx)
+-#define ecx REG(ecx)
+-#define edx REG(edx)
+-#define esi REG(esi)
+-#define edi REG(edi)
+-#define ebp REG(ebp)
+-#define esp REG(esp)
+-
+-#define st0 REG(st)
+-#define st1 REG(st(1))
+-#define st2 REG(st(2))
+-#define st3 REG(st(3))
+-#define st4 REG(st(4))
+-#define st5 REG(st(5))
+-#define st6 REG(st(6))
+-#define st7 REG(st(7))
+-
+-#define ax REG(ax)
+-#define bx REG(bx)
+-#define cx REG(cx)
+-#define dx REG(dx)
+-
+-#define ah REG(ah)
+-#define bh REG(bh)
+-#define ch REG(ch)
+-#define dh REG(dh)
+-
+-#define al REG(al)
+-#define bl REG(bl)
+-#define cl REG(cl)
+-#define dl REG(dl)
+-
+-#define mm1 REG(mm1)
+-#define mm2 REG(mm2)
+-#define mm3 REG(mm3)
+-#define mm4 REG(mm4)
+-#define mm5 REG(mm5)
+-#define mm6 REG(mm6)
+-#define mm7 REG(mm7)
++#define eax %eax
++#define ebx %ebx
++#define ecx %ecx
++#define edx %edx
++#define esi %esi
++#define edi %edi
++#define ebp %ebp
++#define esp %esp
++
++#define st0 %st
++#define st1 %st(1)
++#define st2 %st(2)
++#define st3 %st(3)
++#define st4 %st(4)
++#define st5 %st(5)
++#define st6 %st(6)
++#define st7 %st(7)
++
++#define ax %ax
++#define bx %bx
++#define cx %cx
++#define dx %dx
++
++#define ah %ah
++#define bh %bh
++#define ch %ch
++#define dh %dh
++
++#define al %al
++#define bl %bl
++#define cl %cl
++#define dl %dl
++
++#define mm1 %mm1
++#define mm2 %mm2
++#define mm3 %mm3
++#define mm4 %mm4
++#define mm5 %mm5
++#define mm6 %mm6
++#define mm7 %mm7
+
+ #ifdef _I386MACH_NEED_SOTYPE_FUNCTION
+ #define SOTYPE_FUNCTION(sym) .type SYM(sym),@function
+
+
+We want to have a 64bit offsets libc even on 32bit platforms.
+
+--- ./newlib/configure.host.orig 2008-08-07 16:01:17.801946000 +0100
++++ ./newlib/configure.host 2008-08-07 16:01:34.181064000 +0100
+@@ -317,6 +317,8 @@
+ oext=lo
+ lpfx=
+ aext=la ;;
++ i[34567]86-xen-elf)
++ stdio64_dir=stdio64 ;;
+ *) ;; #shared library not supported for ${host}
+ esac
+
+--- a/newlib/libc/include/sys/_types.h.orig 2008-08-07 15:22:44.925008000 +0100
++++ b/newlib/libc/include/sys/_types.h 2008-08-07 15:22:50.824044000 +0100
+@@ -13,8 +13,12 @@
+ #include <sys/lock.h>
+
+ #ifndef __off_t_defined
++#ifdef __MINIOS__
++typedef long long _off_t;
++#else
+ typedef long _off_t;
+ #endif
++#endif
+
+ #if defined(__rtems__)
+ /* device numbers are 32-bit major and and 32-bit minor */
+--- ./newlib/libc/include/sys/config.h.orig 2008-08-07 14:43:25.915866000 +0100
++++ ./newlib/libc/include/sys/config.h 2008-08-07 14:44:13.508154000 +0100
+@@ -69,6 +69,10 @@
+ /* we use some glibc header files so turn on glibc large file feature */
+ #define _LARGEFILE64_SOURCE 1
+ #endif
++#ifdef __MINIOS__
++#define __LARGE64_FILES 1
++#define _LARGEFILE64_SOURCE 1
++#endif
+ #endif
+
+ #ifndef __DYNAMIC_REENT__
+--- ./newlib/libc/include/sys/_default_fcntl.h.orig 2008-08-07 15:08:22.377836000 +0100
++++ ./newlib/libc/include/sys/_default_fcntl.h 2008-08-07 15:08:31.651890000 +0100
+@@ -170,7 +170,11 @@
+ /* Provide _<systemcall> prototypes for functions provided by some versions
+ of newlib. */
+ #ifdef _COMPILING_NEWLIB
+-extern int _open _PARAMS ((const char *, int, ...));
++extern int _open _PARAMS ((const char *, int, ...))
++#ifdef __MINIOS__
++ asm("open64")
++#endif
++ ;
+ extern int _fcntl _PARAMS ((int, int, ...));
+ #ifdef __LARGE64_FILES
+ extern int _open64 _PARAMS ((const char *, int, ...));
+--- ./newlib/libc/include/sys/unistd.h.orig 2008-08-07 15:09:36.449280000 +0100
++++ ./newlib/libc/include/sys/unistd.h 2008-08-07 15:09:51.210370000 +0100
+@@ -101,7 +101,11 @@
+ int _EXFUN(link, (const char *__path1, const char *__path2 ));
+ int _EXFUN(nice, (int __nice_value ));
+ #if !defined(__INSIDE_CYGWIN__)
+-off_t _EXFUN(lseek, (int __fildes, off_t __offset, int __whence ));
++off_t _EXFUN(lseek, (int __fildes, off_t __offset, int __whence ))
++#ifdef __MINIOS__
++ asm("lseek64")
++#endif
++ ;
+ #endif
+ #if defined(__SPU__)
+ #define F_ULOCK 0
+--- ./newlib/libc/include/sys/stat.h.orig 2008-08-07 16:08:50.495116000 +0100
++++ ./newlib/libc/include/sys/stat.h 2008-08-07 16:10:21.799753000 +0100
+@@ -49,6 +49,9 @@
+ long st_spare4[2];
+ #endif
+ };
++#ifdef __MINIOS__
++#define stat64 stat
++#endif
+ #endif
+
+ #define _IFMT 0170000 /* type of file */
+@@ -132,7 +135,11 @@
+ /* Provide prototypes for most of the _<systemcall> names that are
+ provided in newlib for some compilers. */
+ #ifdef _COMPILING_NEWLIB
+-int _EXFUN(_fstat,( int __fd, struct stat *__sbuf ));
++int _EXFUN(_fstat,( int __fd, struct stat *__sbuf ))
++#ifdef __MINIOS__
++ asm("fstat64")
++#endif
++ ;
+ int _EXFUN(_stat,( const char *__path, struct stat *__sbuf ));
+ #ifdef __LARGE64_FILES
+ struct stat64;
+--- ./newlib/libc/include/_syslist.h.orig 2008-08-07 16:24:19.122605000 +0100
++++ ./newlib/libc/include/_syslist.h 2008-08-07 16:24:21.548628000 +0100
+@@ -14,6 +14,7 @@
+ #define _kill kill
+ #define _link link
+ #define _lseek lseek
++#define _lseek64 lseek64
+ #define _open open
+ #define _read read
+ #define _sbrk sbrk
+--- a/newlib/libc/include/reent.h.orig 2008-08-07 16:28:49.846502000 +0100
++++ b/newlib/libc/include/reent.h 2008-08-07 16:29:02.096586000 +0100
+@@ -87,6 +87,9 @@
+ #if defined(__CYGWIN__) && defined(_COMPILING_NEWLIB)
+ #define stat64 __stat64
+ #endif
++#if defined(__MINIOS__)
++#define stat64 stat
++#endif
+
+ struct stat64;
+
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/polarssl.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/polarssl.patch
new file mode 100644
index 000000000..95487308c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/polarssl.patch
@@ -0,0 +1,64 @@
+diff -Naur polarssl-1.1.4/include/polarssl/config.h polarssl-x86_64/include/polarssl/config.h
+--- a/include/polarssl/config.h 2011-12-22 05:06:27.000000000 -0500
++++ b/include/polarssl/config.h 2012-10-30 17:18:07.567001000 -0400
+@@ -164,8 +164,8 @@
+ * application.
+ *
+ * Uncomment this macro to prevent loading of default entropy functions.
+-#define POLARSSL_NO_DEFAULT_ENTROPY_SOURCES
+ */
++#define POLARSSL_NO_DEFAULT_ENTROPY_SOURCES
+
+ /**
+ * \def POLARSSL_NO_PLATFORM_ENTROPY
+@@ -175,8 +175,8 @@
+ * standards like the /dev/urandom or Windows CryptoAPI.
+ *
+ * Uncomment this macro to disable the built-in platform entropy functions.
+-#define POLARSSL_NO_PLATFORM_ENTROPY
+ */
++#define POLARSSL_NO_PLATFORM_ENTROPY
+
+ /**
+ * \def POLARSSL_PKCS1_V21
+@@ -426,8 +426,8 @@
+ * Requires: POLARSSL_TIMING_C
+ *
+ * This module enables the HAVEGE random number generator.
+- */
+ #define POLARSSL_HAVEGE_C
++ */
+
+ /**
+ * \def POLARSSL_MD_C
+@@ -490,7 +490,7 @@
+ *
+ * This module provides TCP/IP networking routines.
+ */
+-#define POLARSSL_NET_C
++//#define POLARSSL_NET_C
+
+ /**
+ * \def POLARSSL_PADLOCK_C
+@@ -644,8 +644,8 @@
+ * Caller: library/havege.c
+ *
+ * This module is used by the HAVEGE random number generator.
+- */
+ #define POLARSSL_TIMING_C
++ */
+
+ /**
+ * \def POLARSSL_VERSION_C
+diff -Naur polarssl-1.1.4/library/bignum.c polarssl-x86_64/library/bignum.c
+--- a/library/bignum.c 2012-04-29 16:15:55.000000000 -0400
++++ b/library/bignum.c 2012-10-30 17:21:52.135000999 -0400
+@@ -1101,7 +1101,7 @@
+ Z.p[i - t - 1] = ~0;
+ else
+ {
+-#if defined(POLARSSL_HAVE_LONGLONG)
++#if 0 //defined(POLARSSL_HAVE_LONGLONG)
+ t_udbl r;
+
+ r = (t_udbl) X.p[i] << biL;
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/tpmemu-0.7.4.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/tpmemu-0.7.4.patch
new file mode 100644
index 000000000..622b34f59
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/tpmemu-0.7.4.patch
@@ -0,0 +1,12 @@
+diff -Naur tpm_emulator-x86_64-back/tpm/tpm_emulator_extern.c tpm_emulator-x86_64/tpm/tpm_emulator_extern.c
+--- a/tpm/tpm_emulator_extern.c 2012-04-27 10:55:46.581963398 -0400
++++ b/tpm/tpm_emulator_extern.c 2012-04-27 10:56:02.193034152 -0400
+@@ -249,7 +249,7 @@
+ #else /* TPM_NO_EXTERN */
+
+ int (*tpm_extern_init)(void) = NULL;
+-int (*tpm_extern_release)(void) = NULL;
++void (*tpm_extern_release)(void) = NULL;
+ void* (*tpm_malloc)(size_t size) = NULL;
+ void (*tpm_free)(/*const*/ void *ptr) = NULL;
+ void (*tpm_log)(int priority, const char *fmt, ...) = NULL;
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-bufsize.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-bufsize.patch
new file mode 100644
index 000000000..9c9304cff
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-bufsize.patch
@@ -0,0 +1,13 @@
+diff --git a/config.h.in b/config.h.in
+index d16a997..8088a2a 100644
+--- a/config.h.in
++++ b/config.h.in
+@@ -27,7 +27,7 @@
+ #define TPM_STORAGE_NAME "${TPM_STORAGE_NAME}"
+ #define TPM_DEVICE_NAME "${TPM_DEVICE_NAME}"
+ #define TPM_LOG_FILE "${TPM_LOG_FILE}"
+-#define TPM_CMD_BUF_SIZE 4096
++#define TPM_CMD_BUF_SIZE 4088
+
+ #endif /* _CONFIG_H_ */
+
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-cmake-Wextra.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-cmake-Wextra.patch
new file mode 100644
index 000000000..5fee4e9a0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-cmake-Wextra.patch
@@ -0,0 +1,21 @@
+---
+ CMakeLists.txt | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: tpm_emulator-x86_64/CMakeLists.txt
+===================================================================
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -40,10 +40,11 @@ set(TPM_STORAGE_NAME "/var/lib/tpm/tpm_e
+ set(TPM_DEVICE_NAME "/dev/tpm")
+ endif()
+ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h)
+-add_definitions(-Wall -Werror -Wno-unused-parameter -Wpointer-arith -Wcast-align -Wwrite-strings)
++add_definitions(-Wall -Werror)
+ if("${CMAKE_SYSTEM}" MATCHES "Linux")
+ add_definitions(-Wextra)
+ endif()
++add_definitions(-Wno-unused-parameter -Wpointer-arith -Wcast-align -Wwrite-strings)
+ if(USE_OPENSSL)
+ add_definitions(-DUSE_OPENSSL)
+ endif()
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-deepquote-anyloc.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-deepquote-anyloc.patch
new file mode 100644
index 000000000..7b37d5120
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-deepquote-anyloc.patch
@@ -0,0 +1,127 @@
+diff --git a/tpm/tpm_cmd_handler.c b/tpm/tpm_cmd_handler.c
+index 69511d1..7545d51 100644
+--- a/tpm/tpm_cmd_handler.c
++++ b/tpm/tpm_cmd_handler.c
+@@ -3347,12 +3347,13 @@ static TPM_RESULT execute_TPM_DeepQuote(TPM_REQUEST *req, TPM_RESPONSE *rsp)
+ {
+ TPM_NONCE nonce;
+ TPM_RESULT res;
+- UINT32 sigSize;
+- BYTE *sig;
++ UINT32 quote_blob_size;
++ BYTE *quote_blob;
+ BYTE *ptr;
+ UINT32 len;
+ TPM_PCR_SELECTION myPCR;
+ TPM_PCR_SELECTION ptPCR;
++ UINT32 extraInfoFlags = 0;
+
+ tpm_compute_in_param_digest(req);
+
+@@ -3361,17 +3362,19 @@ static TPM_RESULT execute_TPM_DeepQuote(TPM_REQUEST *req, TPM_RESPONSE *rsp)
+ if (tpm_unmarshal_TPM_NONCE(&ptr, &len, &nonce)
+ || tpm_unmarshal_TPM_PCR_SELECTION(&ptr, &len, &myPCR)
+ || tpm_unmarshal_TPM_PCR_SELECTION(&ptr, &len, &ptPCR)
++ || tpm_unmarshal_TPM_DEEP_QUOTE_INFO(&ptr, &len, &extraInfoFlags)
+ || len != 0) return TPM_BAD_PARAMETER;
+
+- res = TPM_DeepQuote(&nonce, &myPCR, &ptPCR, &req->auth1, &sigSize, &sig);
++ res = TPM_DeepQuote(&nonce, &myPCR, &ptPCR, &req->auth1, extraInfoFlags,
++ &quote_blob_size, &quote_blob);
+ if (res != TPM_SUCCESS) return res;
+- rsp->paramSize = len = sigSize;
++ rsp->paramSize = len = quote_blob_size;
+ rsp->param = ptr = tpm_malloc(len);
+- if (ptr == NULL || tpm_marshal_BLOB(&ptr, &len, sig, sigSize)) {
++ if (ptr == NULL || tpm_marshal_BLOB(&ptr, &len, quote_blob, quote_blob_size)) {
+ tpm_free(rsp->param);
+ res = TPM_FAIL;
+ }
+- tpm_free(sig);
++ tpm_free(quote_blob);
+
+ return res;
+ }
+diff --git a/tpm/tpm_commands.h b/tpm/tpm_commands.h
+index 328d1be..a56dd5f 100644
+--- a/tpm/tpm_commands.h
++++ b/tpm/tpm_commands.h
+@@ -3077,6 +3077,7 @@ TPM_RESULT TPM_ParentSignEK(
+ * @myPCR: [in] PCR selection for the virtual TPM
+ * @ptPCR: [in] PCR selection for the hardware TPM
+ * @auth1: [in, out] Authorization protocol parameters
++ * @extraInfoFlags [in] Flags for including, kernel hash, group info, etc
+ * @sigSize: [out] The length of the returned digital signature
+ * @sig: [out] The resulting digital signature and PCR values
+ * Returns: TPM_SUCCESS on success, a TPM error code otherwise.
+@@ -3086,6 +3087,7 @@ TPM_RESULT TPM_DeepQuote(
+ TPM_PCR_SELECTION *myPCR,
+ TPM_PCR_SELECTION *ptPCR,
+ TPM_AUTH *auth1,
++ UINT32 extraInfoFlags,
+ UINT32 *sigSize,
+ BYTE **sig
+ );
+diff --git a/tpm/tpm_credentials.c b/tpm/tpm_credentials.c
+index c0d62e7..6586c22 100644
+--- a/tpm/tpm_credentials.c
++++ b/tpm/tpm_credentials.c
+@@ -183,7 +183,8 @@ TPM_RESULT TPM_OwnerReadInternalPub(TPM_KEY_HANDLE keyHandle, TPM_AUTH *auth1,
+
+ int endorsementKeyFresh = 0;
+
+-TPM_RESULT VTPM_GetParentQuote(TPM_DIGEST* data, TPM_PCR_SELECTION *sel, UINT32 *sigSize, BYTE **sig);
++TPM_RESULT VTPM_GetParentQuote(TPM_NONCE *data, TPM_PCR_SELECTION *sel,
++ UINT32 extraInfoFlags, UINT32 *sigSize, BYTE **sig);
+
+ TPM_RESULT TPM_ParentSignEK(TPM_NONCE *externalData, TPM_PCR_SELECTION *sel,
+ TPM_AUTH *auth1, UINT32 *sigSize, BYTE **sig)
+@@ -191,7 +192,7 @@ TPM_RESULT TPM_ParentSignEK(TPM_NONCE *externalData, TPM_PCR_SELECTION *sel,
+ TPM_PUBKEY pubKey;
+ TPM_RESULT res;
+ TPM_DIGEST hres;
+-
++ UINT32 extraInfoFlags = 0;
+ info("TPM_ParentSignEK()");
+
+ res = tpm_verify_auth(auth1, tpmData.permanent.data.ownerAuth, TPM_KH_OWNER);
+@@ -206,7 +207,7 @@ TPM_RESULT TPM_ParentSignEK(TPM_NONCE *externalData, TPM_PCR_SELECTION *sel,
+ res = TPM_FAIL;
+
+ if (res == TPM_SUCCESS)
+- res = VTPM_GetParentQuote(&hres, sel, sigSize, sig);
++ res = VTPM_GetParentQuote((TPM_NONCE*)&hres, sel, extraInfoFlags, sigSize, sig);
+
+ free_TPM_PUBKEY(pubKey);
+ return res;
+@@ -218,7 +219,7 @@ static const BYTE dquot_hdr[] = {
+
+ TPM_RESULT TPM_DeepQuote(TPM_NONCE *externalData, TPM_PCR_SELECTION *myPCR,
+ TPM_PCR_SELECTION *ptPCR, TPM_AUTH *auth1,
+- UINT32 *sigSize, BYTE **sig)
++ UINT32 extraInfoFlags, UINT32 *quote_blob_size, BYTE **quote_blob)
+ {
+ TPM_RESULT res;
+ TPM_DIGEST hres;
+@@ -253,7 +254,7 @@ TPM_RESULT TPM_DeepQuote(TPM_NONCE *externalData, TPM_PCR_SELECTION *myPCR,
+
+ tpm_free(buf);
+
+- res = VTPM_GetParentQuote(&hres, ptPCR, sigSize, sig);
++ res = VTPM_GetParentQuote((TPM_NONCE*)&hres, ptPCR, extraInfoFlags, quote_blob_size, quote_blob);
+
+ return res;
+ }
+diff --git a/tpm/tpm_marshalling.h b/tpm/tpm_marshalling.h
+index d510ebe..2e0c008 100644
+--- a/tpm/tpm_marshalling.h
++++ b/tpm/tpm_marshalling.h
+@@ -268,6 +268,8 @@ static inline int tpm_unmarshal_BOOL(BYTE **ptr, UINT32 *length, BOOL *v)
+ #define tpm_unmarshal_TPM_REDIR_COMMAND tpm_unmarshal_UINT32
+ #define tpm_marshal_DAAHANDLE tpm_marshal_UINT32
+ #define tpm_unmarshal_DAAHANDLE tpm_unmarshal_UINT32
++#define tpm_marshal_TPM_DEEP_QUOTE_INFO tpm_marshal_UINT32
++#define tpm_unmarshal_TPM_DEEP_QUOTE_INFO tpm_unmarshal_UINT32
+
+ int tpm_marshal_UINT32_ARRAY(BYTE **ptr, UINT32 *length, UINT32 *v, UINT32 n);
+ int tpm_unmarshal_UINT32_ARRAY(BYTE **ptr, UINT32 *length, UINT32 *v, UINT32 n);
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-deepquote.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-deepquote.patch
new file mode 100644
index 000000000..6344f3872
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-deepquote.patch
@@ -0,0 +1,187 @@
+diff --git a/tpm/tpm_cmd_handler.c b/tpm/tpm_cmd_handler.c
+index 0fabf98..69511d1 100644
+--- a/tpm/tpm_cmd_handler.c
++++ b/tpm/tpm_cmd_handler.c
+@@ -3343,6 +3343,39 @@ static TPM_RESULT execute_TPM_ParentSignEK(TPM_REQUEST *req, TPM_RESPONSE *rsp)
+ return res;
+ }
+
++static TPM_RESULT execute_TPM_DeepQuote(TPM_REQUEST *req, TPM_RESPONSE *rsp)
++{
++ TPM_NONCE nonce;
++ TPM_RESULT res;
++ UINT32 sigSize;
++ BYTE *sig;
++ BYTE *ptr;
++ UINT32 len;
++ TPM_PCR_SELECTION myPCR;
++ TPM_PCR_SELECTION ptPCR;
++
++ tpm_compute_in_param_digest(req);
++
++ ptr = req->param;
++ len = req->paramSize;
++ if (tpm_unmarshal_TPM_NONCE(&ptr, &len, &nonce)
++ || tpm_unmarshal_TPM_PCR_SELECTION(&ptr, &len, &myPCR)
++ || tpm_unmarshal_TPM_PCR_SELECTION(&ptr, &len, &ptPCR)
++ || len != 0) return TPM_BAD_PARAMETER;
++
++ res = TPM_DeepQuote(&nonce, &myPCR, &ptPCR, &req->auth1, &sigSize, &sig);
++ if (res != TPM_SUCCESS) return res;
++ rsp->paramSize = len = sigSize;
++ rsp->param = ptr = tpm_malloc(len);
++ if (ptr == NULL || tpm_marshal_BLOB(&ptr, &len, sig, sigSize)) {
++ tpm_free(rsp->param);
++ res = TPM_FAIL;
++ }
++ tpm_free(sig);
++
++ return res;
++}
++
+ static void tpm_setup_rsp_auth(TPM_COMMAND_CODE ordinal, TPM_RESPONSE *rsp)
+ {
+ tpm_hmac_ctx_t hmac;
+@@ -4098,6 +4131,11 @@ void tpm_execute_command(TPM_REQUEST *req, TPM_RESPONSE *rsp)
+ res = execute_TPM_ParentSignEK(req, rsp);
+ break;
+
++ case TPM_ORD_DeepQuote:
++ debug("[TPM_ORD_DeepQuote]");
++ res = execute_TPM_DeepQuote(req, rsp);
++ break;
++
+ default:
+ #ifdef MTM_EMULATOR
+ res = mtm_execute_command(req, rsp);
+diff --git a/tpm/tpm_commands.h b/tpm/tpm_commands.h
+index 7fef934..328d1be 100644
+--- a/tpm/tpm_commands.h
++++ b/tpm/tpm_commands.h
+@@ -3071,6 +3071,25 @@ TPM_RESULT TPM_ParentSignEK(
+ BYTE **sig
+ );
+
++/**
++ * TPM_DeepQuote - gets a hardware TPM quote of a vTPM's PCRs
++ * @externalData: [in] AntiReplay nonce to prevent replay of messages
++ * @myPCR: [in] PCR selection for the virtual TPM
++ * @ptPCR: [in] PCR selection for the hardware TPM
++ * @auth1: [in, out] Authorization protocol parameters
++ * @sigSize: [out] The length of the returned digital signature
++ * @sig: [out] The resulting digital signature and PCR values
++ * Returns: TPM_SUCCESS on success, a TPM error code otherwise.
++ */
++TPM_RESULT TPM_DeepQuote(
++ TPM_NONCE *externalData,
++ TPM_PCR_SELECTION *myPCR,
++ TPM_PCR_SELECTION *ptPCR,
++ TPM_AUTH *auth1,
++ UINT32 *sigSize,
++ BYTE **sig
++);
++
+ /*
+ * Error handling
+ * [tpm_error.c]
+diff --git a/tpm/tpm_credentials.c b/tpm/tpm_credentials.c
+index 01f29e6..c0d62e7 100644
+--- a/tpm/tpm_credentials.c
++++ b/tpm/tpm_credentials.c
+@@ -211,3 +211,49 @@ TPM_RESULT TPM_ParentSignEK(TPM_NONCE *externalData, TPM_PCR_SELECTION *sel,
+ free_TPM_PUBKEY(pubKey);
+ return res;
+ }
++
++static const BYTE dquot_hdr[] = {
++ 0, 0, 0, 0, 'D', 'Q', 'U', 'T'
++};
++
++TPM_RESULT TPM_DeepQuote(TPM_NONCE *externalData, TPM_PCR_SELECTION *myPCR,
++ TPM_PCR_SELECTION *ptPCR, TPM_AUTH *auth1,
++ UINT32 *sigSize, BYTE **sig)
++{
++ TPM_RESULT res;
++ TPM_DIGEST hres;
++ TPM_PCR_INFO_SHORT pcrData;
++ tpm_sha1_ctx_t ctx;
++ BYTE *buf, *ptr;
++ UINT32 size, len;
++
++ info("TPM_DeepQuote()");
++
++ res = tpm_verify_auth(auth1, tpmData.permanent.data.ownerAuth, TPM_KH_OWNER);
++ if (res != TPM_SUCCESS) return res;
++
++ res = tpm_compute_pcr_digest(myPCR, &pcrData.digestAtRelease, NULL);
++ if (res != TPM_SUCCESS) return res;
++
++ pcrData.pcrSelection.sizeOfSelect = myPCR->sizeOfSelect;
++ memcpy(pcrData.pcrSelection.pcrSelect, myPCR->pcrSelect, myPCR->sizeOfSelect);
++ pcrData.localityAtRelease = 1 << tpmData.stany.flags.localityModifier;
++
++ size = len = sizeof_TPM_PCR_INFO_SHORT(pcrData);
++ buf = ptr = tpm_malloc(size);
++ if (buf == NULL) return TPM_NOSPACE;
++ if (tpm_marshal_TPM_PCR_INFO_SHORT(&ptr, &len, &pcrData))
++ return TPM_FAIL;
++
++ tpm_sha1_init(&ctx);
++ tpm_sha1_update(&ctx, dquot_hdr, 8);
++ tpm_sha1_update(&ctx, externalData->nonce, 20);
++ tpm_sha1_update(&ctx, buf, size);
++ tpm_sha1_final(&ctx, hres.digest);
++
++ tpm_free(buf);
++
++ res = VTPM_GetParentQuote(&hres, ptPCR, sigSize, sig);
++
++ return res;
++}
+diff --git a/tpm/tpm_structures.h b/tpm/tpm_structures.h
+index b0f4625..dfb1894 100644
+--- a/tpm/tpm_structures.h
++++ b/tpm/tpm_structures.h
+@@ -660,6 +660,42 @@ typedef struct tdTPM_CMK_MA_APPROVAL {
+
+ /* VTPM-only commands: */
+ /*
++ * Deep Quote - Create quote of PCRs
++ * Input:
++ * TPM_TAG tag TPM_TAG_RQU_AUTH1_COMMAND
++ * UINT32 paramSize Total size of request
++ * TPM_COMMAND_CODE ordinal TPM_ORD_DeepQuote
++ * TPM_NONCE externData 20 bytes of external data
++ * TPM_PCR_SELECTION vtSel PCR selection for virtual TPM
++ * TPM_PCR_SELECTION ptSel PCR selection for physical TPM
++ * ---
++ * UINT32 authHandle Owner authorization session (OIAP)
++ * TPM_NONCE nonceOdd Nonce for authHandle
++ * BOOL continueAuth Continue flag for authHandle
++ * TPM_AUTHDATA privAuth Authorization digest for command
++ *
++ * Output:
++ * TPM_TAG tag TPM_TAG_RSP_AUTH1_COMMAND
++ * UINT32 paramSize Total size of response
++ * TPM_RESULT returnCode Return code of the operation
++ * BYTE[] sig Signature provided by physical TPM
++ * TPM_PCRVALUE[] pcrValue Values of hardware PCRs used in the quote
++ * ---
++ * TPM_NONCE nonceEven Nonce for authHandle
++ * BOOL continueAuth Continue flag for authHandle
++ * TPM_AUTHDATA resAuth Authorization digest for response
++ *
++ * The values of the virutal TPM's PCRs are not included in the response.
++ * The signature is a standard TPM_Quote response from the physical TPM; its
++ * externalData is the SHA1 hash of the following structure:
++ * TPM_STRUCT_VER version MUST be 0.0.0.0
++ * BYTE[4] fixed MUST be the string "DQUT"
++ * TPM_NONCE externData From input to the deep quote
++ * TPM_PCR_INFO_SHORT pcrData Virtual TPM's PCRs
++ */
++#define TPM_ORD_DeepQuote (TPM_VENDOR_COMMAND | TPM_ORD_Quote)
++
++/*
+ * ParentSignEK - Proof of fresh provisioning and EK value
+ *
+ * Input:
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-implicit-fallthrough.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-implicit-fallthrough.patch
new file mode 100644
index 000000000..e95d41fc7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-implicit-fallthrough.patch
@@ -0,0 +1,10 @@
+--- a/tpm/tpm_cmd_handler.c.orig 2017-04-27 13:37:14.408000000 +0200
++++ b/tpm/tpm_cmd_handler.c 2017-04-27 13:39:53.585000000 +0200
+@@ -3397,6 +3397,7 @@
+ sizeof(rsp->auth2->nonceOdd.nonce));
+ tpm_hmac_update(&hmac, (BYTE*)&rsp->auth2->continueAuthSession, 1);
+ tpm_hmac_final(&hmac, rsp->auth2->auth);
++ /* fall-thru */
+ case TPM_TAG_RSP_AUTH1_COMMAND:
+ tpm_hmac_init(&hmac, rsp->auth1->secret, sizeof(rsp->auth1->secret));
+ tpm_hmac_update(&hmac, rsp->auth1->digest, sizeof(rsp->auth1->digest));
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-locality.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-locality.patch
new file mode 100644
index 000000000..8ab7dea67
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-locality.patch
@@ -0,0 +1,50 @@
+diff --git a/tpm/tpm_capability.c b/tpm/tpm_capability.c
+index 60bbb90..f8f7f0f 100644
+--- a/tpm/tpm_capability.c
++++ b/tpm/tpm_capability.c
+@@ -949,6 +949,8 @@ static TPM_RESULT set_vendor(UINT32 subCap, BYTE *setValue,
+ UINT32 setValueSize, BOOL ownerAuth,
+ BOOL deactivated, BOOL disabled)
+ {
++ if (tpmData.stany.flags.localityModifier != 8)
++ return TPM_BAD_PARAMETER;
+ /* set the capability area with the specified data, on failure
+ deactivate the TPM */
+ switch (subCap) {
+diff --git a/tpm/tpm_cmd_handler.c b/tpm/tpm_cmd_handler.c
+index 288d1ce..9e1cfb4 100644
+--- a/tpm/tpm_cmd_handler.c
++++ b/tpm/tpm_cmd_handler.c
+@@ -4132,7 +4132,7 @@ void tpm_emulator_shutdown()
+ tpm_extern_release();
+ }
+
+-int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, uint32_t *out_size)
++int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, uint32_t *out_size, int locality)
+ {
+ TPM_REQUEST req;
+ TPM_RESPONSE rsp;
+@@ -4140,7 +4140,9 @@ int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, uint3
+ UINT32 len;
+ BOOL free_out;
+
+- debug("tpm_handle_command()");
++ debug("tpm_handle_command(%d)", locality);
++ if (locality != -1)
++ tpmData.stany.flags.localityModifier = locality;
+
+ /* we need the whole packet at once, otherwise unmarshalling will fail */
+ if (tpm_unmarshal_TPM_REQUEST((uint8_t**)&in, &in_size, &req) != 0) {
+diff --git a/tpm/tpm_emulator.h b/tpm/tpm_emulator.h
+index eed749e..4c228bd 100644
+--- a/tpm/tpm_emulator.h
++++ b/tpm/tpm_emulator.h
+@@ -59,7 +59,7 @@ void tpm_emulator_shutdown(void);
+ * its usage. In case of an error, all internally allocated memory
+ * is released and the the state of out and out_size is unspecified.
+ */
+-int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, uint32_t *out_size);
++int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, uint32_t *out_size, int locality);
+
+ #endif /* _TPM_EMULATOR_H_ */
+
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-parent-sign-ek.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-parent-sign-ek.patch
new file mode 100644
index 000000000..14e66eee4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/vtpm-parent-sign-ek.patch
@@ -0,0 +1,196 @@
+diff --git a/tpm/tpm_cmd_handler.c b/tpm/tpm_cmd_handler.c
+index 9e1cfb4..0fabf98 100644
+--- a/tpm/tpm_cmd_handler.c
++++ b/tpm/tpm_cmd_handler.c
+@@ -3312,6 +3312,37 @@ static TPM_RESULT execute_TPM_OwnerReadPubek(TPM_REQUEST *req, TPM_RESPONSE *rsp
+ return res;
+ }
+
++static TPM_RESULT execute_TPM_ParentSignEK(TPM_REQUEST *req, TPM_RESPONSE *rsp)
++{
++ TPM_NONCE nonce;
++ TPM_RESULT res;
++ UINT32 sigSize;
++ BYTE *sig;
++ BYTE *ptr;
++ UINT32 len;
++ TPM_PCR_SELECTION targetPCR;
++
++ tpm_compute_in_param_digest(req);
++
++ ptr = req->param;
++ len = req->paramSize;
++ if (tpm_unmarshal_TPM_NONCE(&ptr, &len, &nonce)
++ || tpm_unmarshal_TPM_PCR_SELECTION(&ptr, &len, &targetPCR)
++ || len != 0) return TPM_BAD_PARAMETER;
++
++ res = TPM_ParentSignEK(&nonce, &targetPCR, &req->auth1, &sigSize, &sig);
++ if (res != TPM_SUCCESS) return res;
++ rsp->paramSize = len = sigSize;
++ rsp->param = ptr = tpm_malloc(len);
++ if (ptr == NULL || tpm_marshal_BLOB(&ptr, &len, sig, sigSize)) {
++ tpm_free(rsp->param);
++ res = TPM_FAIL;
++ }
++ tpm_free(sig);
++
++ return res;
++}
++
+ static void tpm_setup_rsp_auth(TPM_COMMAND_CODE ordinal, TPM_RESPONSE *rsp)
+ {
+ tpm_hmac_ctx_t hmac;
+@@ -4062,6 +4093,11 @@ void tpm_execute_command(TPM_REQUEST *req, TPM_RESPONSE *rsp)
+ res = execute_TPM_OwnerReadPubek(req, rsp);
+ break;
+
++ case TPM_ORD_ParentSignEK:
++ debug("[TPM_ORD_ParentSignEK]");
++ res = execute_TPM_ParentSignEK(req, rsp);
++ break;
++
+ default:
+ #ifdef MTM_EMULATOR
+ res = mtm_execute_command(req, rsp);
+diff --git a/tpm/tpm_commands.h b/tpm/tpm_commands.h
+index a7666f6..7fef934 100644
+--- a/tpm/tpm_commands.h
++++ b/tpm/tpm_commands.h
+@@ -3054,6 +3054,23 @@ TPM_RESULT TPM_OwnerReadPubek(
+ TPM_PUBKEY *pubEndorsementKey
+ );
+
++/**
++ * TPM_ParentSignEK - gets a hardware TPM quote of a vTPM's EK
++ * @externalData: [in] AntiReplay nonce to prevent replay of messages
++ * @sel: [in] PCR selection for the hardware TPM's quote
++ * @auth1: [in, out] Authorization protocol parameters
++ * @sigSize: [out] The length of the returned digital signature
++ * @sig: [out] The resulting digital signature and PCR values
++ * Returns: TPM_SUCCESS on success, a TPM error code otherwise.
++ */
++TPM_RESULT TPM_ParentSignEK(
++ TPM_NONCE *externalData,
++ TPM_PCR_SELECTION *sel,
++ TPM_AUTH *auth1,
++ UINT32 *sigSize,
++ BYTE **sig
++);
++
+ /*
+ * Error handling
+ * [tpm_error.c]
+diff --git a/tpm/tpm_credentials.c b/tpm/tpm_credentials.c
+index 9cd64af..01f29e6 100644
+--- a/tpm/tpm_credentials.c
++++ b/tpm/tpm_credentials.c
+@@ -180,3 +180,34 @@ TPM_RESULT TPM_OwnerReadInternalPub(TPM_KEY_HANDLE keyHandle, TPM_AUTH *auth1,
+ return TPM_BAD_PARAMETER;
+ }
+ }
++
++int endorsementKeyFresh = 0;
++
++TPM_RESULT VTPM_GetParentQuote(TPM_DIGEST* data, TPM_PCR_SELECTION *sel, UINT32 *sigSize, BYTE **sig);
++
++TPM_RESULT TPM_ParentSignEK(TPM_NONCE *externalData, TPM_PCR_SELECTION *sel,
++ TPM_AUTH *auth1, UINT32 *sigSize, BYTE **sig)
++{
++ TPM_PUBKEY pubKey;
++ TPM_RESULT res;
++ TPM_DIGEST hres;
++
++ info("TPM_ParentSignEK()");
++
++ res = tpm_verify_auth(auth1, tpmData.permanent.data.ownerAuth, TPM_KH_OWNER);
++ if (res != TPM_SUCCESS) return res;
++
++ if (!endorsementKeyFresh) return TPM_DISABLED_CMD;
++
++ res = tpm_get_pubek(&pubKey);
++ if (res != TPM_SUCCESS) return res;
++
++ if (tpm_compute_pubkey_checksum(externalData, &pubKey, &hres))
++ res = TPM_FAIL;
++
++ if (res == TPM_SUCCESS)
++ res = VTPM_GetParentQuote(&hres, sel, sigSize, sig);
++
++ free_TPM_PUBKEY(pubKey);
++ return res;
++}
+diff --git a/tpm/tpm_data.c b/tpm/tpm_data.c
+index 50c9697..6a0c499 100644
+--- a/tpm/tpm_data.c
++++ b/tpm/tpm_data.c
+@@ -76,6 +76,8 @@ static void init_timeouts(void)
+ tpmData.permanent.data.cmd_durations[2] = 1000;
+ }
+
++extern int endorsementKeyFresh;
++
+ void tpm_init_data(void)
+ {
+ /* endorsement key */
+@@ -157,6 +159,7 @@ void tpm_init_data(void)
+ if (tpmConf & TPM_CONF_GENERATE_EK) {
+ /* generate a new endorsement key */
+ tpm_rsa_generate_key(&tpmData.permanent.data.endorsementKey, 2048);
++ endorsementKeyFresh = 1;
+ } else {
+ /* setup endorsement key */
+ tpm_rsa_import_key(&tpmData.permanent.data.endorsementKey,
+diff --git a/tpm/tpm_structures.h b/tpm/tpm_structures.h
+index f746c05..b0f4625 100644
+--- a/tpm/tpm_structures.h
++++ b/tpm/tpm_structures.h
+@@ -658,6 +658,49 @@ typedef struct tdTPM_CMK_MA_APPROVAL {
+ #define TPM_ORD_TickStampBlob 242
+ #define TPM_ORD_MAX 256
+
++/* VTPM-only commands: */
++/*
++ * ParentSignEK - Proof of fresh provisioning and EK value
++ *
++ * Input:
++ * TPM_TAG tag TPM_TAG_RQU_AUTH1_COMMAND
++ * UINT32 paramSize Total size of request
++ * TPM_COMMAND_CODE ordinal TPM_ORD_ParentSignEK
++ * TPM_NONCE externData 20 bytes of external data
++ * TPM_PCR_SELECTION ptSel PCR selection for physical TPM
++ * ---
++ * UINT32 authHandle Owner authorization session (OIAP)
++ * TPM_NONCE nonceOdd Nonce for authHandle
++ * BOOL continueAuth Continue flag for authHandle
++ * TPM_AUTHDATA privAuth Authorization digest for command
++ *
++ * Output:
++ * TPM_TAG tag TPM_TAG_RSP_AUTH1_COMMAND
++ * UINT32 paramSize Total size of response
++ * TPM_RESULT returnCode Return code of the operation
++ * BYTE[] sig Signature provided by physical TPM
++ * TPM_PCRVALUE[] pcrValue Values of hardware PCRs used in the quote
++ * ---
++ * TPM_NONCE nonceEven Nonce for authHandle
++ * BOOL continueAuth Continue flag for authHandle
++ * TPM_AUTHDATA resAuth Authorization digest for response
++ *
++ * This command is only valid on the first boot of a vTPM; on any subsequent
++ * boot, the command returns TPM_DISABLED_CMD. It is intended to be used to
++ * provide evidence of proper platform configuration to the verifier/CA which is
++ * responsible for the creation of the vTPM's endorsement credential, which will
++ * be used on subsequent boots to certify AIKs via the usual Privacy CA protocol.
++ *
++ * The values of the virtual TPM's PCRs are not included in the response.
++ * The signature is a standard TPM_Quote response from the physical TPM; its
++ * externalData is the SHA1 hash of the following structure:
++ * TPM_PUBKEY pubEK The vTPM's public EK
++ * TPM_NONCE externData From input to the deep quote
++ *
++ * This structure was chosen to match the return of TPM_ReadPubek
++ */
++#define TPM_ORD_ParentSignEK (TPM_VENDOR_COMMAND | TPM_ORD_ReadPubek)
++
+ /*
+ * TCS Ordinals ([TPM_Part2], Section 17.1)
+ *
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/xsa246-4.9.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa246-4.9.patch
new file mode 100644
index 000000000..6370a1062
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa246-4.9.patch
@@ -0,0 +1,74 @@
+From: Julien Grall <julien.grall@linaro.org>
+Subject: x86/pod: prevent infinite loop when shattering large pages
+
+When populating pages, the PoD may need to split large ones using
+p2m_set_entry and request the caller to retry (see ept_get_entry for
+instance).
+
+p2m_set_entry may fail to shatter if it is not possible to allocate
+memory for the new page table. However, the error is not propagated
+resulting to the callers to retry infinitely the PoD.
+
+Prevent the infinite loop by return false when it is not possible to
+shatter the large mapping.
+
+This is XSA-246.
+
+Signed-off-by: Julien Grall <julien.grall@linaro.org>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: George Dunlap <george.dunlap@citrix.com>
+
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+@@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai
+ * NOTE: In a fine-grained p2m locking scenario this operation
+ * may need to promote its locking from gfn->1g superpage
+ */
+- p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
+- p2m_populate_on_demand, p2m->default_access);
+- return 0;
++ return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
++ p2m_populate_on_demand, p2m->default_access);
+ }
+
+ /* Only reclaim if we're in actual need of more cache. */
+@@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai
+
+ gfn_aligned = (gfn >> order) << order;
+
+- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
+- p2m->default_access);
++ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
++ p2m->default_access) )
++ {
++ p2m_pod_cache_add(p2m, p, order);
++ goto out_fail;
++ }
+
+ for( i = 0; i < (1UL << order); i++ )
+ {
+@@ -1150,13 +1153,18 @@ remap_and_retry:
+ BUG_ON(order != PAGE_ORDER_2M);
+ pod_unlock(p2m);
+
+- /* Remap this 2-meg region in singleton chunks */
+- /* NOTE: In a p2m fine-grained lock scenario this might
+- * need promoting the gfn lock from gfn->2M superpage */
++ /*
++ * Remap this 2-meg region in singleton chunks. See the comment on the
++ * 1G page splitting path above for why a single call suffices.
++ *
++ * NOTE: In a p2m fine-grained lock scenario this might
++ * need promoting the gfn lock from gfn->2M superpage.
++ */
+ gfn_aligned = (gfn>>order)<<order;
+- for(i=0; i<(1<<order); i++)
+- p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
+- p2m_populate_on_demand, p2m->default_access);
++ if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
++ p2m_populate_on_demand, p2m->default_access) )
++ return -1;
++
+ if ( tb_init_done )
+ {
+ struct {
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/xsa248.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa248.patch
new file mode 100644
index 000000000..966c16e04
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa248.patch
@@ -0,0 +1,164 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/mm: don't wrongly set page ownership
+
+PV domains can obtain mappings of any pages owned by the correct domain,
+including ones that aren't actually assigned as "normal" RAM, but used
+by Xen internally. At the moment such "internal" pages marked as owned
+by a guest include pages used to track logdirty bits, as well as p2m
+pages and the "unpaged pagetable" for HVM guests. Since the PV memory
+management and shadow code conflict in their use of struct page_info
+fields, and since shadow code is being used for log-dirty handling for
+PV domains, pages coming from the shadow pool must, for PV domains, not
+have the domain set as their owner.
+
+While the change could be done conditionally for just the PV case in
+shadow code, do it unconditionally (and for consistency also for HAP),
+just to be on the safe side.
+
+There's one special case though for shadow code: The page table used for
+running a HVM guest in unpaged mode is subject to get_page() (in
+set_shadow_status()) and hence must have its owner set.
+
+This is XSA-248.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+Reviewed-by: George Dunlap <george.dunlap@citrix.com>
+---
+v2: Drop PGC_page_table related pieces.
+
+--- a/xen/arch/x86/mm/hap/hap.c
++++ b/xen/arch/x86/mm/hap/hap.c
+@@ -286,8 +286,7 @@ static struct page_info *hap_alloc_p2m_p
+ {
+ d->arch.paging.hap.total_pages--;
+ d->arch.paging.hap.p2m_pages++;
+- page_set_owner(pg, d);
+- pg->count_info |= 1;
++ ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
+ }
+ else if ( !d->arch.paging.p2m_alloc_failed )
+ {
+@@ -302,21 +301,23 @@ static struct page_info *hap_alloc_p2m_p
+
+ static void hap_free_p2m_page(struct domain *d, struct page_info *pg)
+ {
++ struct domain *owner = page_get_owner(pg);
++
+ /* This is called both from the p2m code (which never holds the
+ * paging lock) and the log-dirty code (which always does). */
+ paging_lock_recursive(d);
+
+- ASSERT(page_get_owner(pg) == d);
+- /* Should have just the one ref we gave it in alloc_p2m_page() */
+- if ( (pg->count_info & PGC_count_mask) != 1 ) {
+- HAP_ERROR("Odd p2m page %p count c=%#lx t=%"PRtype_info"\n",
+- pg, pg->count_info, pg->u.inuse.type_info);
++ /* Should still have no owner and count zero. */
++ if ( owner || (pg->count_info & PGC_count_mask) )
++ {
++ HAP_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n",
++ d->domain_id, mfn_x(page_to_mfn(pg)),
++ owner ? owner->domain_id : DOMID_INVALID,
++ pg->count_info, pg->u.inuse.type_info);
+ WARN();
++ pg->count_info &= ~PGC_count_mask;
++ page_set_owner(pg, NULL);
+ }
+- pg->count_info &= ~PGC_count_mask;
+- /* Free should not decrement domain's total allocation, since
+- * these pages were allocated without an owner. */
+- page_set_owner(pg, NULL);
+ d->arch.paging.hap.p2m_pages--;
+ d->arch.paging.hap.total_pages++;
+ hap_free(d, page_to_mfn(pg));
+--- a/xen/arch/x86/mm/shadow/common.c
++++ b/xen/arch/x86/mm/shadow/common.c
+@@ -1503,32 +1503,29 @@ shadow_alloc_p2m_page(struct domain *d)
+ pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
+ d->arch.paging.shadow.p2m_pages++;
+ d->arch.paging.shadow.total_pages--;
++ ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
+
+ paging_unlock(d);
+
+- /* Unlike shadow pages, mark p2m pages as owned by the domain.
+- * Marking the domain as the owner would normally allow the guest to
+- * create mappings of these pages, but these p2m pages will never be
+- * in the domain's guest-physical address space, and so that is not
+- * believed to be a concern. */
+- page_set_owner(pg, d);
+- pg->count_info |= 1;
+ return pg;
+ }
+
+ static void
+ shadow_free_p2m_page(struct domain *d, struct page_info *pg)
+ {
+- ASSERT(page_get_owner(pg) == d);
+- /* Should have just the one ref we gave it in alloc_p2m_page() */
+- if ( (pg->count_info & PGC_count_mask) != 1 )
++ struct domain *owner = page_get_owner(pg);
++
++ /* Should still have no owner and count zero. */
++ if ( owner || (pg->count_info & PGC_count_mask) )
+ {
+- SHADOW_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n",
++ SHADOW_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n",
++ d->domain_id, mfn_x(page_to_mfn(pg)),
++ owner ? owner->domain_id : DOMID_INVALID,
+ pg->count_info, pg->u.inuse.type_info);
++ pg->count_info &= ~PGC_count_mask;
++ page_set_owner(pg, NULL);
+ }
+- pg->count_info &= ~PGC_count_mask;
+ pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
+- page_set_owner(pg, NULL);
+
+ /* This is called both from the p2m code (which never holds the
+ * paging lock) and the log-dirty code (which always does). */
+@@ -3132,7 +3129,9 @@ int shadow_enable(struct domain *d, u32
+ e = __map_domain_page(pg);
+ write_32bit_pse_identmap(e);
+ unmap_domain_page(e);
++ pg->count_info = 1;
+ pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
++ page_set_owner(pg, d);
+ }
+
+ paging_lock(d);
+@@ -3170,7 +3169,11 @@ int shadow_enable(struct domain *d, u32
+ if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
+ p2m_teardown(p2m);
+ if ( rv != 0 && pg != NULL )
++ {
++ pg->count_info &= ~PGC_count_mask;
++ page_set_owner(pg, NULL);
+ shadow_free_p2m_page(d, pg);
++ }
+ domain_unpause(d);
+ return rv;
+ }
+@@ -3279,7 +3282,22 @@ out:
+
+ /* Must be called outside the lock */
+ if ( unpaged_pagetable )
++ {
++ if ( page_get_owner(unpaged_pagetable) == d &&
++ (unpaged_pagetable->count_info & PGC_count_mask) == 1 )
++ {
++ unpaged_pagetable->count_info &= ~PGC_count_mask;
++ page_set_owner(unpaged_pagetable, NULL);
++ }
++ /* Complain here in cases where shadow_free_p2m_page() won't. */
++ else if ( !page_get_owner(unpaged_pagetable) &&
++ !(unpaged_pagetable->count_info & PGC_count_mask) )
++ SHADOW_ERROR("d%d: Odd unpaged pt %"PRI_mfn" c=%lx t=%"PRtype_info"\n",
++ d->domain_id, mfn_x(page_to_mfn(unpaged_pagetable)),
++ unpaged_pagetable->count_info,
++ unpaged_pagetable->u.inuse.type_info);
+ shadow_free_p2m_page(d, unpaged_pagetable);
++ }
+ }
+
+ void shadow_final_teardown(struct domain *d)
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/xsa249.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa249.patch
new file mode 100644
index 000000000..ecfa4305e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa249.patch
@@ -0,0 +1,42 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/shadow: fix refcount overflow check
+
+Commit c385d27079 ("x86 shadow: for multi-page shadows, explicitly track
+the first page") reduced the refcount width to 25, without adjusting the
+overflow check. Eliminate the disconnect by using a manifest constant.
+
+Interestingly, up to commit 047782fa01 ("Out-of-sync L1 shadows: OOS
+snapshot") the refcount was 27 bits wide, yet the check was already
+using 26.
+
+This is XSA-249.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: George Dunlap <george.dunlap@citrix.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+---
+v2: Simplify expression back to the style it was.
+
+--- a/xen/arch/x86/mm/shadow/private.h
++++ b/xen/arch/x86/mm/shadow/private.h
+@@ -529,7 +529,7 @@ static inline int sh_get_ref(struct doma
+ x = sp->u.sh.count;
+ nx = x + 1;
+
+- if ( unlikely(nx >= 1U<<26) )
++ if ( unlikely(nx >= (1U << PAGE_SH_REFCOUNT_WIDTH)) )
+ {
+ SHADOW_PRINTK("shadow ref overflow, gmfn=%lx smfn=%lx\n",
+ __backpointer(sp), mfn_x(smfn));
+--- a/xen/include/asm-x86/mm.h
++++ b/xen/include/asm-x86/mm.h
+@@ -82,7 +82,8 @@ struct page_info
+ unsigned long type:5; /* What kind of shadow is this? */
+ unsigned long pinned:1; /* Is the shadow pinned? */
+ unsigned long head:1; /* Is this the first page of the shadow? */
+- unsigned long count:25; /* Reference count */
++#define PAGE_SH_REFCOUNT_WIDTH 25
++ unsigned long count:PAGE_SH_REFCOUNT_WIDTH; /* Reference count */
+ } sh;
+
+ /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/xsa250.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa250.patch
new file mode 100644
index 000000000..26aeb33fe
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa250.patch
@@ -0,0 +1,67 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/shadow: fix ref-counting error handling
+
+The old-Linux handling in shadow_set_l4e() mistakenly ORed together the
+results of sh_get_ref() and sh_pin(). As the latter failing is not a
+correctness problem, simply ignore its return value.
+
+In sh_set_toplevel_shadow() a failing sh_get_ref() must not be
+accompanied by installing the entry, despite the domain being crashed.
+
+This is XSA-250.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/mm/shadow/multi.c
++++ b/xen/arch/x86/mm/shadow/multi.c
+@@ -923,7 +923,7 @@ static int shadow_set_l4e(struct domain
+ shadow_l4e_t new_sl4e,
+ mfn_t sl4mfn)
+ {
+- int flags = 0, ok;
++ int flags = 0;
+ shadow_l4e_t old_sl4e;
+ paddr_t paddr;
+ ASSERT(sl4e != NULL);
+@@ -938,15 +938,16 @@ static int shadow_set_l4e(struct domain
+ {
+ /* About to install a new reference */
+ mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
+- ok = sh_get_ref(d, sl3mfn, paddr);
+- /* Are we pinning l3 shadows to handle wierd linux behaviour? */
+- if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
+- ok |= sh_pin(d, sl3mfn);
+- if ( !ok )
++
++ if ( !sh_get_ref(d, sl3mfn, paddr) )
+ {
+ domain_crash(d);
+ return SHADOW_SET_ERROR;
+ }
++
++ /* Are we pinning l3 shadows to handle weird Linux behaviour? */
++ if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
++ sh_pin(d, sl3mfn);
+ }
+
+ /* Write the new entry */
+@@ -3965,14 +3966,15 @@ sh_set_toplevel_shadow(struct vcpu *v,
+
+ /* Take a ref to this page: it will be released in sh_detach_old_tables()
+ * or the next call to set_toplevel_shadow() */
+- if ( !sh_get_ref(d, smfn, 0) )
++ if ( sh_get_ref(d, smfn, 0) )
++ new_entry = pagetable_from_mfn(smfn);
++ else
+ {
+ SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
+ domain_crash(d);
++ new_entry = pagetable_null();
+ }
+
+- new_entry = pagetable_from_mfn(smfn);
+-
+ install_new_entry:
+ /* Done. Install it */
+ SHADOW_PRINTK("%u/%u [%u] gmfn %#"PRI_mfn" smfn %#"PRI_mfn"\n",
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/xsa251.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa251.patch
new file mode 100644
index 000000000..582ef622e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa251.patch
@@ -0,0 +1,21 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86/paging: don't unconditionally BUG() on finding SHARED_M2P_ENTRY
+
+PV guests can fully control the values written into the P2M.
+
+This is XSA-251.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/mm/paging.c
++++ b/xen/arch/x86/mm/paging.c
+@@ -274,7 +274,7 @@ void paging_mark_pfn_dirty(struct domain
+ return;
+
+ /* Shared MFNs should NEVER be marked dirty */
+- BUG_ON(SHARED_M2P(pfn_x(pfn)));
++ BUG_ON(paging_mode_translate(d) && SHARED_M2P(pfn_x(pfn)));
+
+ /*
+ * Values with the MSB set denote MFNs that aren't really part of the
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/xsa253.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa253.patch
new file mode 100644
index 000000000..19e426935
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/xsa253.patch
@@ -0,0 +1,26 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/msr: Free msr_vcpu_policy during vcpu destruction
+
+c/s 4187f79dc7 "x86/msr: introduce struct msr_vcpu_policy" introduced a
+per-vcpu memory allocation, but failed to free it in the clean vcpu
+destruction case.
+
+This is XSA-253
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
+index b17468c..0ae715d 100644
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -382,6 +382,9 @@ void vcpu_destroy(struct vcpu *v)
+
+ vcpu_destroy_fpu(v);
+
++ xfree(v->arch.msr);
++ v->arch.msr = NULL;
++
+ if ( !is_idle_domain(v->domain) )
+ vpmu_destroy(v);
+
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/lwip.inc b/import-layers/meta-virtualization/recipes-extended/xen/lwip.inc
new file mode 100644
index 000000000..e08363326
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/lwip.inc
@@ -0,0 +1,24 @@
+# Copyright (C) 2018 kebodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+require stubdom.inc
+
+# clear this out to break dependency circle
+DEPENDS = ""
+
+# Nothing to configure or compile
+# For stubdoms, lwip is basically a source package with a couple of patches applied.
+do_configure[noexec] = "1"
+do_compile[noexec] = "1"
+
+# needed because this directory isn't typically part of a sysroot
+SYSROOT_DIRS += "${prefix}/lwip"
+
+FILES_${PN} = "\
+ ${prefix} \
+"
+
+do_install() {
+ install -d ${D}${prefix}/lwip
+ cp -r -t ${D}${prefix}/lwip ${S}/src/*
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/lwip_1.3.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/lwip_1.3.0.bb
new file mode 100644
index 000000000..b7d8e95dc
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/lwip_1.3.0.bb
@@ -0,0 +1,19 @@
+# Copyright (C) 2018 kebodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+DESCRIPTION = "LWIP"
+HOMEPAGE = "https://savannah.nongnu.org/projects/lwip"
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://COPYING;md5=59a383b05013356e0c9899b06dc5da3f"
+
+SRCREV_lwip = "bcb4afa886408bf0a1dde9c2a4a00323c8b07eb1"
+SRC_URI = "\
+ git://git.savannah.gnu.org/lwip.git;protocol=git;nobranch=1;destsuffix=lwip;name=lwip \
+ file://lwip.patch-cvs \
+ file://lwip.dhcp_create_request-hwaddr_len.patch \
+"
+
+S="${WORKDIR}/${PN}"
+B="${S}"
+
+require lwip.inc
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/mini-os.inc b/import-layers/meta-virtualization/recipes-extended/xen/mini-os.inc
new file mode 100644
index 000000000..61c270417
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/mini-os.inc
@@ -0,0 +1,28 @@
+# Copyright (C) 2018 kebodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+require stubdom.inc
+
+# clear this out to break dependency circle
+DEPENDS = ""
+
+do_configure() {
+ ${MAKE} -C ${WORKDIR}/mini-os links
+}
+
+# Nothing to configure or compile
+do_compile[noexec] = "1"
+
+# needed because this directory isn't typically part of a sysroot
+SYSROOT_DIRS += "${prefix}/mini-os"
+RDEPENDS_${PN}-dev = "perl"
+
+FILES_${PN}-dev = "\
+ ${prefix} \
+"
+
+do_install() {
+ install -d ${D}${prefix}/mini-os
+ cp -r -t ${D}${prefix}/mini-os ${S}/*
+ rm -rf ${D}${prefix}/mini-os/scripts
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/mini-os_4.9.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/mini-os_4.9.0.bb
new file mode 100644
index 000000000..298bc8df6
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/mini-os_4.9.0.bb
@@ -0,0 +1,18 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+DESCRIPTION = "Mini-OS is a tiny OS kernel distributed with the Xen Project"
+HOMEPAGE = "https://wiki.xenproject.org/wiki/Mini-OS"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=8a437231894440a8f7629caa372243d0"
+
+# git commit hash for tags: xen-RELEASE-4.9.0, xen-RELEASE-4.9.1, xen-RELEASE-4.9.2
+SRCREV_minios = "ca013fa9baf92f47469ba1f2e1aaa31c41d8a0bb"
+SRC_URI = "\
+ git://xenbits.xen.org/mini-os.git;protocol=git;nobranch=1;destsuffix=mini-os;name=minios \
+ file://mini-os_udivmoddi4-gcc7.patch \
+"
+S="${WORKDIR}/mini-os"
+B="${S}"
+
+require mini-os.inc
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/newlib.inc b/import-layers/meta-virtualization/recipes-extended/xen/newlib.inc
new file mode 100644
index 000000000..1fdbd0aa4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/newlib.inc
@@ -0,0 +1,64 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+require stubdom.inc
+
+CPPFLAGS_INCLUDE_DIR = "-isystem `${HOST_PREFIX}gcc -print-file-name=include`"
+STUBDOM_CFLAGS += "-D_I386MACH_ALLOW_HW_INTERRUPTS"
+
+export ac_cv_path_CC_FOR_TARGET="${HOST_PREFIX}gcc --sysroot=${RECIPE_SYSROOT} ${STUBDOM_CPPFLAGS} ${STUBDOM_CFLAGS}"
+export CC_FOR_TARGET="${HOST_PREFIX}gcc --sysroot=${RECIPE_SYSROOT} ${STUBDOM_CPPFLAGS} ${STUBDOM_CFLAGS}"
+
+export ac_cv_path_CXX_FOR_TARGET="${HOST_PREFIX}g++ --sysroot=${RECIPE_SYSROOT}"
+export CXX_FOR_TARGET="${HOST_PREFIX}g++ --sysroot=${RECIPE_SYSROOT}"
+
+export ac_cv_path_CPP_FOR_TARGET="${HOST_PREFIX}gcc -E --sysroot=${RECIPE_SYSROOT}"
+export CPP_FOR_TARGET="${HOST_PREFIX}gcc -E --sysroot=${RECIPE_SYSROOT}"
+
+export ac_cv_path_LD_FOR_TARGET="${HOST_PREFIX}ld --sysroot=${RECIPE_SYSROOT}"
+export LD_FOR_TARGET="${HOST_PREFIX}ld --sysroot=${RECIPE_SYSROOT}"
+
+export ac_cv_path_AS_FOR_TARGET="${HOST_PREFIX}as"
+export AS_FOR_TARGET="${HOST_PREFIX}as"
+
+export ac_cv_path_AR_FOR_TARGET="${HOST_PREFIX}ar"
+export AR_FOR_TARGET="${HOST_PREFIX}ar"
+
+export ac_cv_path_NM_FOR_TARGET="${HOST_PREFIX}nm"
+export NM_FOR_TARGET="${HOST_PREFIX}nm"
+
+export ac_cv_path_RANLIB_FOR_TARGET="${HOST_PREFIX}ranlib"
+export RANLIB_FOR_TARGET="${HOST_PREFIX}ranlib"
+
+export ac_cv_path_OBJDUMP_FOR_TARGET="${HOST_PREFIX}objdump"
+export OBJDUMP_FOR_TARGET="${HOST_PREFIX}objdump"
+
+export ac_cv_path_OBJCOPY_FOR_TARGET="${HOST_PREFIX}objcopy"
+export OBJCOPY_FOR_TARGET="${HOST_PREFIX}objcopy"
+
+export ac_cv_path_STRIP_FOR_TARGET="${HOST_PREFIX}strip"
+export STRIP_FOR_TARGET="${HOST_PREFIX}strip"
+
+export ac_cv_path_STRINGS_FOR_TARGET="${HOST_PREFIX}strings"
+export STRINGS_FOR_TARGET="${HOST_PREFIX}strings"
+
+export ac_cv_path_READELF_FOR_TARGET="${HOST_PREFIX}readelf"
+export READELF_FOR_TARGET="${HOST_PREFIX}readelf"
+
+do_configure() {
+ ${S}/configure --verbose --prefix=${prefix} --target=${GNU_TARGET_ARCH}-xen-elf --enable-newlib-io-long-long --disable-multilib
+ find ${S} -type f | xargs perl -i.bak -pe 's/\b_(tzname|daylight|timezone)\b/$1/g'
+}
+
+do_compile() {
+ ${MAKE}
+}
+
+do_install() {
+ ${MAKE} DESTDIR=${D} install
+ rm -rf ${D}/cross-root-${GNU_TARGET_ARCH}/share
+ rm -rf ${D}/cross-root-${GNU_TARGET_ARCH}/info
+ install -m 644 `${HOST_PREFIX}gcc -print-file-name=include`/stdarg.h -t ${D}/${includedir}
+ install -m 644 `${HOST_PREFIX}gcc -print-file-name=include`/stddef.h -t ${D}/${includedir}
+ install -m 644 `${HOST_PREFIX}gcc -print-file-name=include`/stdbool.h -t ${D}/${includedir}
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/newlib_1.16.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/newlib_1.16.0.bb
new file mode 100644
index 000000000..60672bea0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/newlib_1.16.0.bb
@@ -0,0 +1,21 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+DESCRIPTION = "Newlib is a C library intended for use on embedded systems."
+HOMEPAGE = "http://sourceware.org/newlib"
+LICENSE = "GPLv2 & LGPLv3 & GPLv3 & LGPLv2"
+LIC_FILES_CHKSUM = "file://COPYING.NEWLIB;md5=950f50b290e8fcf7a2d3fff61775de9b"
+
+# this is the hash of version tag 1_16_0
+SRCREV_newlib = "07b4b67a88f386ce4716a14e0ff2c2bce992b985"
+SRC_URI = "\
+ git://sourceware.org/git/newlib-cygwin.git;protocol=git;nobranch=1;destsuffix=newlib;name=newlib \
+ file://newlib.patch \
+ file://newlib-chk.patch \
+ file://newlib-stdint-size_max-fix-from-1.17.0.patch \
+"
+
+S="${WORKDIR}/newlib"
+B="${WORKDIR}/build"
+
+require newlib.inc
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/polarssl.inc b/import-layers/meta-virtualization/recipes-extended/xen/polarssl.inc
new file mode 100644
index 000000000..eae95f406
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/polarssl.inc
@@ -0,0 +1,27 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+require stubdom.inc
+
+DEPENDS += "\
+ newlib \
+"
+STUBDOM_CFLAGS += "\
+ -Wno-memset-elt-size \
+ -Wno-implicit-fallthrough \
+"
+
+# nothing to configure
+do_configure[noexec] = "1"
+
+do_compile() {
+ ${MAKE} CC="${HOST_PREFIX}gcc --sysroot=${RECIPE_SYSROOT} ${STUBDOM_CPPFLAGS} ${STUBDOM_CFLAGS}"
+}
+
+do_install() {
+ install -d ${D}${includedir}
+ cp -r -t ${D}${includedir} ${S}/include/polarssl
+
+ install -d ${D}/${libdir}
+ install -m 644 -t ${D}/${libdir} ${S}/library/libpolarssl.a
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/polarssl_1.1.4.bb b/import-layers/meta-virtualization/recipes-extended/xen/polarssl_1.1.4.bb
new file mode 100644
index 000000000..80ea45a76
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/polarssl_1.1.4.bb
@@ -0,0 +1,19 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+DESCRIPTION = "PolarSSL (now 'mbed TLS') is an open source, portable, easy to use, readable and flexible SSL library."
+HOMEPAGE = "https://tls.mbed.org"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=751419260aa954499f7abaabaa882bbe"
+
+# git hash for release tag polarssl-1.1.4
+SRCREV_polarssl = "d36da11125a9c85c572a4fdf63e0a25e76d7bb18"
+SRC_URI = "\
+ git://github.com/ARMmbed/mbedtls.git;protocol=https;nobranch=1;destsuffix=polarssl;name=polarssl \
+ file://polarssl.patch; \
+"
+
+S="${WORKDIR}/${PN}"
+B="${S}/library"
+
+require polarssl.inc
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/stubdom-gmp.inc b/import-layers/meta-virtualization/recipes-extended/xen/stubdom-gmp.inc
new file mode 100644
index 000000000..ef6473d4b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/stubdom-gmp.inc
@@ -0,0 +1,37 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+require stubdom.inc
+
+DEPENDS += "\
+ newlib \
+"
+
+do_configure() {
+ # need to modify prefix here during configure otherwise OE-level variables
+ # (prefix, libdir, includedir, etc...) defined in stubdom.inc get messed up
+ CPPFLAGS="-isystem ${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/${GNU_TARGET_ARCH}-xen-elf/include ${STUBDOM_CPPFLAGS}" \
+ CFLAGS="${STUBDOM_CFLAGS}" \
+ CC="${HOST_PREFIX}gcc --sysroot=${RECIPE_SYSROOT}" \
+ ${S}/configure \
+ --disable-shared \
+ --enable-static \
+ --disable-fft \
+ --without-readline \
+ --with-gnu-ld \
+ --prefix=${prefix}/${GNU_TARGET_ARCH}-xen-elf \
+ --libdir=${libdir} \
+ --build=`${HOST_PREFIX}gcc -dumpmachine` \
+ --host=${GNU_TARGET_ARCH}-xen-elf
+
+ sed -i 's/#define HAVE_OBSTACK_VPRINTF 1/\/\/#define HAVE_OBSTACK_VPRINTF 1/' ${S}/config.h
+}
+
+do_compile() {
+ ${MAKE}
+}
+
+do_install() {
+ ${MAKE} DESTDIR=${D} install
+ rm -rf ${D}${prefix}/${GNU_TARGET_ARCH}-xen-elf/share
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/stubdom-gmp_4.3.2.bb b/import-layers/meta-virtualization/recipes-extended/xen/stubdom-gmp_4.3.2.bb
new file mode 100644
index 000000000..d9a2adbd3
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/stubdom-gmp_4.3.2.bb
@@ -0,0 +1,20 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+DESCRIPTION = "GMP library for Xen vTPM's."
+HOMEPAGE = "http://gmp"
+LICENSE = "GPLv3"
+LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504 \
+ file://COPYING.LIB;md5=6a6a8e020838b23406c81b19c1d46df6 \
+"
+
+SRC_URI = "\
+ https://gmplib.org/download/gmp/archive/gmp-${PV}.tar.bz2 \
+"
+SRC_URI[md5sum] = "dd60683d7057917e34630b4a787932e8"
+SRC_URI[sha256sum] = "936162c0312886c21581002b79932829aa048cfaf9937c6265aeaa14f1cd1775"
+
+S="${WORKDIR}/gmp-${PV}"
+B="${S}"
+
+require stubdom-gmp.inc
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/stubdom.inc b/import-layers/meta-virtualization/recipes-extended/xen/stubdom.inc
new file mode 100644
index 000000000..bfc675276
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/stubdom.inc
@@ -0,0 +1,152 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+COMPATIBLE_HOST = '(x86_64.*).*-linux'
+
+require xen-arch.inc
+
+# many of the xen stubdom related recipes build and package static instead of shared libraries
+EXCLUDE_FROM_SHLIBS = "1"
+
+# base set of dependencies to be used for xen stubdom recipes
+DEPENDS += "\
+ lwip \
+ mini-os \
+"
+
+# unset EVERYTHING from the OE environment.
+# Weird things happen when these are exported into the environment.
+unset CFLAGS
+unset BUILD_CFLAGS
+unset TARGET_CFLAGS
+unset CFLAGS_FOR_BUILD
+
+unset CPPFLAGS
+unset BUILD_CPPFLAGS
+unset TARGET_CPPFLAGS
+unset CPPFLAGS_FOR_BUILD
+
+unset LDFLAGS
+unset BUILD_LDFLAGS
+unset TARGET_LDFLAGS
+unset LDFLAGS_FOR_BUILD
+
+unset CXXFLAGS
+unset TARGET_CXXFLAGS
+unset BUILD_CXXFLAGS
+unset CXXFLAGS_FOR_BUILD
+
+unset BUILD_CC
+unset BUILD_CPP
+unset BUILD_CXX
+unset BUILD_LD
+unset BUILD_AR
+unset BUILD_AS
+unset BUILD_CCLD
+unset BUILD_FC
+unset BUILD_RANLIB
+unset BUILD_NM
+unset BUILD_STRIP
+unset BUILD_READELF
+unset BUILD_OBJCOPY
+unset BUILD_OBJDUMP
+unset CC
+unset CPP
+unset CXX
+unset LD
+unset AR
+unset AS
+unset NM
+unset RANLIB
+unset STRIP
+unset STRINGS
+unset READELF
+unset OBJCOPY
+unset OBJDUMP
+unset READELF
+unset CCLD
+unset FC
+
+# Provide support to build both 32-bit and 64-bit stubdoms
+python () {
+ gnu_dict = {
+ 'x86_32': 'i686',
+ 'x86_64': 'x86_64',
+ }
+
+ if d.expand('${XEN_TARGET_ARCH}') == 'x86_32':
+ d.setVar("GNU_TARGET_ARCH",gnu_dict[d.expand('${XEN_TARGET_ARCH}')])
+ d.setVar("PACKAGE_ARCH","core2-32")
+ elif d.expand('${XEN_TARGET_ARCH}') == 'x86_64':
+ d.setVar("GNU_TARGET_ARCH",gnu_dict[d.expand('${XEN_TARGET_ARCH}')])
+}
+
+export GNU_TARGET_ARCH
+export XEN_TARGET_ARCH="${@map_xen_arch(d.getVar('TARGET_ARCH'), d)}"
+export XEN_COMPILE_ARCH="${@map_xen_arch(d.getVar('BUILD_ARCH'), d)}"
+
+LWIP_SRCDIR = "${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/lwip"
+MINIOS_SRCDIR = "${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/mini-os"
+
+# Base set of CPPFLAGS, CFLAGS needed for each component used to build MiniOS-based stubdoms
+# LDFLAGS are only used when building stubdoms, so only used in stubdom recipes
+# Generic name given because each library uses DEF_, BUILD_, TARGET_, and xxxFLAGS differently
+CPPFLAGS_INCLUDE_DIR = "-isystem ${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/${GNU_TARGET_ARCH}-xen-elf/include"
+
+STUBDOM_CPPFLAGS += "\
+ -isystem ${MINIOS_SRCDIR}/include \
+ -D__MINIOS__ \
+ -DHAVE_LIBC \
+ -isystem ${MINIOS_SRCDIR}/include/posix \
+ -isystem ${MINIOS_SRCDIR}/include/xen \
+ -isystem ${MINIOS_SRCDIR}/include/x86 \
+ -isystem ${MINIOS_SRCDIR}/include/x86/${XEN_TARGET_ARCH} \
+ -U __linux__ \
+ -U __FreeBSD__ \
+ -U __sun__ \
+ -nostdinc \
+ ${CPPFLAGS_INCLUDE_DIR} \
+ -isystem ${LWIP_SRCDIR}/include \
+ -isystem ${LWIP_SRCDIR}/include/ipv4 \
+"
+
+STUBDOM_CFLAGS += "\
+ -mno-red-zone \
+ -O1 \
+ -fno-omit-frame-pointer \
+ -m64 \
+ -fno-reorder-blocks \
+ -fno-asynchronous-unwind-tables \
+ -DBUILD_ID \
+ -fno-strict-aliasing \
+ -std=gnu99 \
+ -Wall \
+ -Wstrict-prototypes \
+ -Wdeclaration-after-statement \
+ -Wno-unused-but-set-variable \
+ -Wno-unused-local-typedefs \
+ -fno-stack-protector \
+ -fno-exceptions \
+"
+
+STUBDOM_LDFLAGS = "\
+ -nostdlib \
+ -L${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/${GNU_TARGET_ARCH}-xen-elf/lib \
+"
+
+# Need to redefine these for stubdom-related builds. It all starts because of
+# the prefix used in newlib and then continues because we don't want to
+# cross-contaminate stubdom-related recipes with headers and libraries found in
+# the OE-defined locations
+export prefix="/cross-root-${GNU_TARGET_ARCH}"
+export includedir="${prefix}/${GNU_TARGET_ARCH}-xen-elf/include"
+export libdir="${prefix}/${GNU_TARGET_ARCH}-xen-elf/lib"
+export libexecdir="${libdir}"
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+# Typically defined in Xen and Minios .mk files that aren't sourced/read,
+# defined to trigger some values and paths in Makefiles
+export debug="y"
+export stubdom="y"
+export XEN_OS="MiniOS"
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/tpm-emulator.inc b/import-layers/meta-virtualization/recipes-extended/xen/tpm-emulator.inc
new file mode 100644
index 000000000..f58a27639
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/tpm-emulator.inc
@@ -0,0 +1,37 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+require stubdom.inc
+
+DEPENDS += "\
+ cmake-native \
+ newlib \
+ stubdom-gmp \
+"
+
+# nothing to configure
+do_configure[noexec] = "1"
+
+export CMAKE_C_FLAGS = "\
+ -std=c99 \
+ -DTPM_NO_EXTERN \
+ ${STUBDOM_CPPFLAGS} \
+ ${STUBDOM_CFLAGS} \
+ -Wno-declaration-after-statement \
+ -Wno-implicit-fallthrough \
+"
+
+do_compile() {
+ CC="${HOST_PREFIX}gcc --sysroot=${RECIPE_SYSROOT}" cmake .. -DCMAKE_C_FLAGS:STRING="${CMAKE_C_FLAGS}"
+ ${MAKE} VERBOSE=1 tpm_crypto tpm
+}
+
+do_install() {
+ install -d ${D}/${libdir}
+ install -m 644 -t ${D}/${libdir} ${B}/crypto/libtpm_crypto.a
+ install -m 644 -t ${D}/${libdir} ${B}/tpm/libtpm.a
+
+ install -D -m 644 -t ${D}/${includedir}/tpm-emulator/build ${S}/build/config.h
+ install -D -m 644 -t ${D}/${includedir}/tpm-emulator/crypto ${S}/crypto/*.h
+ install -D -m 644 -t ${D}/${includedir}/tpm-emulator/tpm ${S}/tpm/*.h
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/tpm-emulator_0.7.4.bb b/import-layers/meta-virtualization/recipes-extended/xen/tpm-emulator_0.7.4.bb
new file mode 100644
index 000000000..f6269297a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/tpm-emulator_0.7.4.bb
@@ -0,0 +1,26 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+DESCRIPTION = "TPM Emulator"
+HOMEPAGE = "http://xenbits.xen.org/xen-extfiles"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://README;md5=eeabd77cf8fd8a8bc42983884cb09863"
+
+SRC_URI = "\
+ http://xenbits.xen.org/xen-extfiles/tpm_emulator-${PV}.tar.gz;name=tpm-emulator \
+ file://tpmemu-0.7.4.patch \
+ file://vtpm-bufsize.patch \
+ file://vtpm-locality.patch \
+ file://vtpm-parent-sign-ek.patch \
+ file://vtpm-deepquote.patch \
+ file://vtpm-deepquote-anyloc.patch \
+ file://vtpm-cmake-Wextra.patch \
+ file://vtpm-implicit-fallthrough.patch \
+"
+SRC_URI[tpm-emulator.md5sum] = "e26becb8a6a2b6695f6b3e8097593db8"
+SRC_URI[tpm-emulator.sha256sum] = "4e48ea0d83dd9441cc1af04ab18cd6c961b9fa54d5cbf2c2feee038988dea459"
+
+S="${WORKDIR}/tpm_emulator-${PV}"
+B="${S}/build"
+
+require tpm-emulator.inc
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen-vtpm.inc b/import-layers/meta-virtualization/recipes-extended/xen/xen-vtpm.inc
new file mode 100644
index 000000000..a94c7503d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen-vtpm.inc
@@ -0,0 +1,111 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+require stubdom.inc
+
+DEPENDS = "\
+ newlib \
+ lwip \
+ mini-os \
+ polarssl \
+ stubdom-gmp \
+ tpm-emulator \
+"
+# These were unset by stubdom.inc to allow us to scope them per recipe
+export CC="${HOST_PREFIX}gcc --sysroot=${RECIPE_SYSROOT}"
+export CCLD="${HOST_PREFIX}gcc --sysroot=${RECIPE_SYSROOT}"
+export CXX="${HOST_PREFIX}g++ --sysroot=${RECIPE_SYSROOT}"
+export CPP="${HOST_PREFIX}gcc -E --sysroot=${RECIPE_SYSROOT}"
+export LD="${HOST_PREFIX}ld --sysroot=${RECIPE_SYSROOT}"
+export LD_LTO="${HOST_PREFIX}ld --sysroot=${RECIPE_SYSROOT}"
+export AS="${HOST_PREFIX}as"
+export AR="${HOST_PREFIX}ar"
+export NM="${HOST_PREFIX}nm"
+export RANLIB="${HOST_PREFIX}ranlib"
+export OBJDUMP="${HOST_PREFIX}objdump"
+export OBJCOPY="${HOST_PREFIX}objcopy"
+export STRIP="${HOST_PREFIX}strip"
+export STRINGS="${HOST_PREFIX}strings"
+export READELF="${HOST_PREFIX}readelf"
+
+
+# Required for some of the config stuff
+export STUBDOM_TARGETS="vtpm vtpmmgr"
+
+VTPM_CPPFLAGS = "\
+ -I${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/${GNU_TARGET_ARCH}-xen-elf/include/tpm-emulator/build \
+ -I${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/${GNU_TARGET_ARCH}-xen-elf/include/tpm-emulator/crypto \
+ -I${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/${GNU_TARGET_ARCH}-xen-elf/include/tpm-emulator/tpm \
+ -I${RECIPE_SYSROOT}/cross-root-${GNU_TARGET_ARCH}/${GNU_TARGET_ARCH}-xen-elf/include/tpm-emulator \
+"
+
+# The includes from this Xen directory are not in the MiniOS repo, although they probably should be.
+STUBDOM_CPPFLAGS += "-isystem ${B}/include/"
+
+do_configure() {
+
+ # GCC 7 fails linking header defined inlines if not declared 'static' or 'extern'
+ # This appears to be fixed in Xen 4.10.0+, so let's look at version of Xen source
+ # to determine if we need to modify inline declarations.
+ #
+ # 'echo -e' to enable interpretation of backslashes
+ # 'sort -V' to natural sort version numbers
+ # 'head -n1' to capture the first line of output from sort command
+
+ if [ "${PV}" = "$(echo "${PV};4.9.999" | sed 's/;/\n/' | sort -V | head -n1)" ]; then
+ sed -i "s/^inline/static inline/g" ${B}/vtpmmgr/*.h
+ fi
+
+ for i in AR AS NM RANLIB OBJDUMP OBJCOPY STRIP STRINGS READELF CXX LD LD_LTO CC CPP; do
+ sed -i "s/^\($i\s\s*\).*=/\1?=/" ${MINIOS_SRCDIR}/Config.mk
+ done
+
+ # replicate the TARGETS_MINIOS target in xen/stubdom/Makefile
+ for i in ${STUBDOM_TARGETS}; do
+ [ -d ${B}/mini-os-${XEN_TARGET_ARCH}-$i ] ||
+ for j in $(cd ${MINIOS_SRCDIR} ; find . -type d) ; do \
+ mkdir -p ${B}/mini-os-${XEN_TARGET_ARCH}-$i/$j; \
+ done
+ done
+}
+
+do_compile() {
+ ${MAKE} MINIOS_CONFIG="${B}/vtpm/minios.cfg" CONFIG_FILE="${B}/vtpm-minios-config.mk" DESTDIR= -C ${MINIOS_SRCDIR} config
+ CPPFLAGS="`cat ${B}/vtpm-minios-config.mk` ${STUBDOM_CPPFLAGS} ${VTPM_CPPFLAGS}" CFLAGS="${STUBDOM_CFLAGS}" ${MAKE} -C ${B}/vtpm
+ DEF_CPPFLAGS="${STUBDOM_CPPFLAGS}" \
+ DEF_CFLAGS="${STUBDOM_CFLAGS}" \
+ DEF_LDFLAGS="${STUBDOM_LDFLAGS}" \
+ MINIOS_CONFIG="${B}/vtpm/minios.cfg" \
+ ${MAKE} -C ${MINIOS_SRCDIR} \
+ OBJ_DIR=${B}/mini-os-${XEN_TARGET_ARCH}-vtpm \
+ APP_OBJS="${B}/vtpm/vtpm.a" \
+ APP_LDLIBS="-ltpm -ltpm_crypto -lgmp -lpolarssl"
+
+ ${MAKE} MINIOS_CONFIG="${B}/vtpmmgr/minios.cfg" CONFIG_FILE="${B}/vtpmmgr-minios-config.mk" DESTDIR= -C ${MINIOS_SRCDIR} config
+ CPPFLAGS="`cat ${B}/vtpmmgr-minios-config.mk` ${STUBDOM_CPPFLAGS}" CFLAGS="${STUBDOM_CFLAGS}" ${MAKE} -C ${B}/vtpmmgr
+ DEF_CPPFLAGS="${STUBDOM_CPPFLAGS}" \
+ DEF_CFLAGS="${STUBDOM_CFLAGS}" \
+ DEF_LDFLAGS="${STUBDOM_LDFLAGS}" \
+ MINIOS_CONFIG="${B}/vtpmmgr/minios.cfg" \
+ ${MAKE} -C ${MINIOS_SRCDIR} \
+ OBJ_DIR=${B}/mini-os-${XEN_TARGET_ARCH}-vtpmmgr \
+ APP_OBJS="${B}/vtpmmgr/vtpmmgr.a" \
+ APP_LDLIBS="-lm -lpolarssl"
+}
+
+PACKAGES = "\
+ ${PN}-vtpm-stubdom \
+ ${PN}-vtpmmgr-stubdom \
+"
+FILES_${PN}-vtpm-stubdom="\
+ ${libdir}/xen/boot/vtpm-stubdom.gz \
+"
+
+FILES_${PN}-vtpmmgr-stubdom="\
+ ${libdir}/xen/boot/vtpmmgr-stubdom.gz \
+"
+
+do_install() {
+ install -m 644 -D ${B}/mini-os-${XEN_TARGET_ARCH}-vtpm/mini-os.gz ${D}${libdir}/xen/boot/vtpm-stubdom.gz
+ install -m 644 -D ${B}/mini-os-${XEN_TARGET_ARCH}-vtpmmgr/mini-os.gz ${D}${libdir}/xen/boot/vtpmmgr-stubdom.gz
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen-vtpm_4.9.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen-vtpm_4.9.0.bb
new file mode 100644
index 000000000..c58a02d8e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen-vtpm_4.9.0.bb
@@ -0,0 +1,21 @@
+# Copyright (C) 2017 Kurt Bodiker <kurt.bodiker@braintrust-us.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+DESCRIPTION = "MiniOS-based vTPMs for Xen"
+HOMEPAGE = "https://www.xenproject.org"
+LICENSE = "GPLv2 & BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://COPYING;md5=b1ceb1b03a49b202ee6f41ffd1ed0155 \
+ file://vtpm/COPYING;md5=75a98062ab0322ded060d9026a1bda61 \
+"
+
+# git commit hash for Xen's RELEASE-4.9.0 tag
+SRCREV_xen = "c30bf55594a53fae8aae08aabf16fc192faad7da"
+SRC_URI = "\
+ git://xenbits.xen.org/xen.git;protocol=git;nobranch=1;name=xen;subpath=stubdom \
+ git://xenbits.xen.org/xen.git;protocol=git;nobranch=1;name=xen;destsuffix=stubdom/include;subpath=tools/xenstore/include \
+"
+
+S="${WORKDIR}/stubdom"
+B="${S}"
+
+require xen-vtpm.inc
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen.inc b/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
index cb314f88b..8dfe589af 100644
--- a/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
@@ -62,15 +62,12 @@ RDEPENDS_${PN} = ""
RDEPENDS_${PN}-base = "\
bash perl xz \
- ${PN}-blktap \
${PN}-console \
- ${PN}-libblktapctl \
${PN}-libxenguest \
${PN}-libxenlight \
${PN}-libxenvchan \
${PN}-libxenctrl \
${PN}-libxlutil \
- ${PN}-libvhd \
${PN}-libxenstat \
${PN}-libxenstore \
${PN}-libfsimage \
@@ -88,7 +85,10 @@ RDEPENDS_${PN}-base = "\
RDEPENDS_${PN}-dev = ""
RRECOMMENDS_${PN}-base = " \
+ ${PN}-blktap \
${PN}-libblktap \
+ ${PN}-libblktapctl \
+ ${PN}-libvhd \
${PN}-flask \
${PN}-hvmloader \
${PN}-xenpaging \
@@ -196,6 +196,8 @@ PACKAGES = "\
${PN}-libxenstat-dev \
${PN}-libxenstore \
${PN}-libxenstore-dev \
+ ${PN}-libxentoolcore \
+ ${PN}-libxentoolcore-dev \
${PN}-libxentoollog \
${PN}-libxentoollog-dev \
${PN}-libxenvchan \
@@ -344,6 +346,12 @@ FILES_${PN}-libxenstore-dev = " \
${datadir}/pkgconfig/xenstore.pc \
"
+FILES_${PN}-libxentoolcore = "${libdir}/libxentoolcore.so.*"
+FILES_${PN}-libxentoolcore-dev = " \
+ ${libdir}/libxentoolcore.so \
+ ${datadir}/pkgconfig/xentoolcore.pc \
+ "
+
FILES_${PN}-libxentoollog = "${libdir}/libxentoollog.so.*"
FILES_${PN}-libxentoollog-dev = " \
${libdir}/libxentoollog.so \
@@ -474,6 +482,7 @@ FILES_${PN}-misc = "\
${sbindir}/xenperf \
${sbindir}/xenpm \
${sbindir}/xsview \
+ ${sbindir}/xen-diag \
${sbindir}/xen-tmem-list-parse \
${sbindir}/xen-python-path \
${sbindir}/xen-ringwatch \
@@ -739,8 +748,6 @@ FILES_${PN}-xencommons += "\
${systemd_unitdir}/system/xenconsoled.service \
${systemd_unitdir}/system/xen-init-dom0.service \
${systemd_unitdir}/system/xenstored.service \
- ${systemd_unitdir}/system/xenstored.socket \
- ${systemd_unitdir}/system/xenstored_ro.socket \
${systemd_unitdir}/system/var-lib-xenstored.mount \
"
@@ -780,8 +787,6 @@ SYSTEMD_SERVICE_${PN}-xencommons = " \
xenconsoled.service \
xen-init-dom0.service \
xenstored.service \
- xenstored.socket \
- xenstored_ro.socket \
"
SYSTEMD_SERVICE_${PN}-xendomains = "xendomains.service"
@@ -825,7 +830,7 @@ export LDFLAGS=""
# Yocto injects -mfpmath=sse for some machine types into the CFLAGS which
# conflicts with -mno-sse so instead we strip -mfpmath=sse instead of
# patching the build to be ok with this
-TUNE_CCARGS := "${@oe_filter_out('-mfpmath=sse', '${TUNE_CCARGS}', d)}"
+TUNE_CCARGS := "${@oe.utils.str_filter_out('-mfpmath=sse', '${TUNE_CCARGS}', d)}"
EXTRA_OECONF += " \
--exec-prefix=/usr \
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.10.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.10.0.bb
new file mode 100644
index 000000000..d314b9b6a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.10.0.bb
@@ -0,0 +1,12 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+require xen.inc
+
+SRC_URI = " \
+ https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \
+ file://xsa253.patch \
+ "
+
+SRC_URI[md5sum] = "ab9d320d02cb40f6b40506aed1a38d58"
+SRC_URI[sha256sum] = "0262a7023f8b12bcacfb0b25e69b2a63291f944f7683d54d8f33d4b2ca556844"
+
+S = "${WORKDIR}/xen-${PV}"
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.0.bb
deleted file mode 100644
index 8e9c8024b..000000000
--- a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.0.bb
+++ /dev/null
@@ -1,12 +0,0 @@
-FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
-require xen.inc
-
-SRC_URI = " \
- https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \
- file://fix-libxc-xc_dom_arm-missing-initialization.patch \
- "
-
-SRC_URI[md5sum] = "f0a753637630f982dfbdb64121fd71e1"
-SRC_URI[sha256sum] = "cade643fe3310d4d6f97d0c215c6fa323bc1130d7e64d7e2043ffaa73a96f33b"
-
-S = "${WORKDIR}/xen-${PV}"
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.1.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.1.bb
new file mode 100644
index 000000000..5c18bb00a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.1.bb
@@ -0,0 +1,18 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+require xen.inc
+
+SRC_URI = " \
+ https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \
+ file://xsa246-4.9.patch \
+ file://0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch \
+ file://0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch \
+ file://xsa248.patch \
+ file://xsa249.patch \
+ file://xsa250.patch \
+ file://xsa251.patch \
+ "
+
+SRC_URI[md5sum] = "8b9d6104694b164d54334194135f7217"
+SRC_URI[sha256sum] = "ecf88b01f44cd8f4ef208af3f999dceb69bdd2a316d88dd9a9535ea7b49ed356"
+
+S = "${WORKDIR}/xen-${PV}"
OpenPOWER on IntegriCloud