summaryrefslogtreecommitdiffstats
path: root/src/usr/hwpf/hwp/build_winkle_images
diff options
context:
space:
mode:
authorThi Tran <thi@us.ibm.com>2013-03-29 10:58:41 -0500
committerA. Patrick Williams III <iawillia@us.ibm.com>2013-04-01 10:51:31 -0500
commit0bb75102df21ce246bfd909225f53d2d0ac91fed (patch)
tree80b7881766125031d0cbb673b38af52f7067959a /src/usr/hwpf/hwp/build_winkle_images
parent467ae10a804451a843409e6b94a3c0108c083939 (diff)
downloadtalos-hostboot-0bb75102df21ce246bfd909225f53d2d0ac91fed.tar.gz
talos-hostboot-0bb75102df21ce246bfd909225f53d2d0ac91fed.zip
TULETA Bring Up - Memory HW procedures 03/29/2013
Change-Id: I528f51d33b9ff6addc81ae203900dcf47df5639b Reviewed-on: http://gfw160.austin.ibm.com:8080/gerrit/3801 Tested-by: Jenkins Server Reviewed-by: Van H. Lee <vanlee@us.ibm.com> Reviewed-by: A. Patrick Williams III <iawillia@us.ibm.com>
Diffstat (limited to 'src/usr/hwpf/hwp/build_winkle_images')
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pba_init.H87
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pfet_init.C20
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pm.H35
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/pgp_pba.h49
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_delta_scan_rw.h3
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_image_help_base.C511
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_ring_identification.c194
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pgas.h66
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_bitmanip.H73
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline.h79
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline_assembler.c49
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.c824
-rw-r--r--src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.h29
13 files changed, 1276 insertions, 743 deletions
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pba_init.H b/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pba_init.H
index 7019b2cc4..dc92fcc4b 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pba_init.H
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pba_init.H
@@ -5,7 +5,7 @@
/* */
/* IBM CONFIDENTIAL */
/* */
-/* COPYRIGHT International Business Machines Corp. 2012 */
+/* COPYRIGHT International Business Machines Corp. 2012,2013 */
/* */
/* p1 */
/* */
@@ -30,20 +30,21 @@
// *! OWNER NAME: Klaus P. Gungl Email: kgungl@de.ibm.com
// *!
// *! General Description:
-// *!
+// *!
// *! include file for pba_init with constants, definitions, prototypes
// *!
//------------------------------------------------------------------------------
-//
+//
-#ifndef _PROC_PBAINIT_H_
-#define _PROC_PBAINIT_H_
+#ifndef _P8_PBAINIT_H_
+#define _P8_PBAINIT_H_
#include "p8_scom_addresses.H"
typedef fapi::ReturnCode (*p8_pba_init_FP_t) (const fapi::Target& , uint64_t );
-// constant definitions for valid command scope. LIMIT is used by setup routine for plausibility checking.
+// constant definitions for valid command scope. LIMIT is used by setup routine
+// for plausibility checking.
#define PBA_CMD_SCOPE_NODAL 0x00
#define PBA_CMD_SCOPE_GROUP 0x01
@@ -53,50 +54,64 @@ typedef fapi::ReturnCode (*p8_pba_init_FP_t) (const fapi::Target& , uint64_t );
#define PBA_CMD_SCOPE_FOREIGN1 0x05
#define PBA_CMD_SCOPE_LIMIT 0x06
-enum cmd_scope_t
+enum cmd_scope_t
{
- CMD_SCOPE_NODAL,
- CMD_SCOPE_GROUP,
- CMD_SCOPE_SYSTEM,
- CMD_SCOPE_RGP,
- CMD_SCOPE_FOREIGN0,
- CMD_SCOPE_FOREIGN1
+ CMD_SCOPE_NODAL,
+ CMD_SCOPE_GROUP,
+ CMD_SCOPE_SYSTEM,
+ CMD_SCOPE_RGP,
+ CMD_SCOPE_FOREIGN0,
+ CMD_SCOPE_FOREIGN1
};
// enum cmd_scope_type {NODAL, GROUP, SYSTEM, RGP, FOREIGN0, FOREIGN1 };
-// addresses of PBA and PBABAR, actually a duplicate of definitions in "p8_scom_addresses.H" but here an array to be indexed.
-const uint64_t PBA_BARs[4] =
+// addresses of PBA and PBABAR, actually a duplicate of definitions in
+// "p8_scom_addresses.H" but here an array to be indexed.
+const uint64_t PBA_BARs[4] =
{
- PBA_BAR0_0x02013F00,
- PBA_BAR1_0x02013F01,
- PBA_BAR2_0x02013F02,
+ PBA_BAR0_0x02013F00,
+ PBA_BAR1_0x02013F01,
+ PBA_BAR2_0x02013F02,
PBA_BAR3_0x02013F03
};
-const uint64_t PBA_BARMSKs[4] =
+const uint64_t PBA_BARMSKs[4] =
{
- PBA_BARMSK0_0x02013F04,
- PBA_BARMSK1_0x02013F05,
- PBA_BARMSK2_0x02013F06,
+ PBA_BARMSK0_0x02013F04,
+ PBA_BARMSK1_0x02013F05,
+ PBA_BARMSK2_0x02013F06,
PBA_BARMSK3_0x02013F07
};
-const uint64_t PBA_SLVCTLs[4] =
+const uint64_t PBA_SLVCTLs[4] =
+{
+ PBA_SLVCTL0_0x00064004,
+ PBA_SLVCTL1_0x00064005,
+ PBA_SLVCTL2_0x00064006,
+ PBA_SLVCTL3_0x00064007
+};
+
+const uint64_t PBA_SLVRESETs[4] =
{
- PBA_SLVCTL0_0x00064004,
- PBA_SLVCTL1_0x00064005,
- PBA_SLVCTL2_0x00064006,
- PBA_SLVCTL3_0x00064007};
+ 0x8000000000000000ull,
+ 0xA000000000000000ull,
+ 0xC000000000000000ull,
+ 0xE000000000000000ull
+};
+
+// Maximum number of Polls for PBA slave reset
+#define MAX_PBA_RESET_POLLS 16
+#define PBA_RESET_POLL_DELAY 1 // in microseconds
// bar mask is valid for bits 23 to 43, in a 64bit value this is
-// 1 2 3 4 5 6
+// 1 2 3 4 5 6
// 0123456789012345678901234567890123456789012345678901234567890123
// 0000000000000000000000011111111111111111111100000000000000000000
// 0 0 0 0 0 1 F F F F F 0 0 0 0 0
// 0000000000000011111111111111111111111111111100000000000000000000
// 0 0 0 3 F F F F F F F 0 0 0 0 0
-// 0123456701234567
+// 0123456701234567
#define BAR_MASK_LIMIT 0x000001FFFFF00000ull
#define BAR_ADDR_LIMIT 0x0003FFFFFFF00000ull
@@ -140,7 +155,7 @@ typedef union {
typedef struct {
bar_reg_type bar_reg;
- barmsk_reg_type barmsk_reg;
+ barmsk_reg_type barmsk_reg;
} struct_pba_bar_init_type;
@@ -151,7 +166,7 @@ typedef struct {
unsigned short reserved_2:10;
unsigned long addr:30;
unsigned long reserved_3:20;
- };
+ };
struct struct_pba_barmsk{
unsigned long reserved_1:23;
unsigned long mask:21;
@@ -182,7 +197,7 @@ typedef union pbaxcfg_typ{
unsigned long reserved_2 :23 ;
} fields;
} pbaxcfg_t;
-
+
@@ -190,11 +205,11 @@ typedef union pbaxcfg_typ{
// Function prototypes
// ----------------------------------------------------------------------
-extern "C"
+extern "C"
{
-fapi::ReturnCode
-p8_pba_init (const fapi::Target& i_target,
+fapi::ReturnCode
+p8_pba_init (const fapi::Target& i_target,
uint64_t mode
);
@@ -203,6 +218,6 @@ p8_pba_init (const fapi::Target& i_target,
-#endif // _PROC_PBAINITQ_H_
+#endif // _P8_PBAINITQ_H_
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pfet_init.C b/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pfet_init.C
index 0bf8aca9d..a3eeb412d 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pfet_init.C
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pfet_init.C
@@ -20,7 +20,7 @@
/* Origin: 30 */
/* */
/* IBM_PROLOG_END_TAG */
-// $Id: p8_pfet_init.C,v 1.2 2013/01/29 19:39:45 jmcgill Exp $
+// $Id: p8_pfet_init.C,v 1.3 2013/03/18 17:58:33 pchatnah Exp $
// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ipl/fapi/p8_pfet_init.C,v $
//------------------------------------------------------------------------------
// *! (C) Copyright International Business Machines Corp. 2011
@@ -266,7 +266,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_FREQ_PROC_REFCLOCK");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
@@ -277,7 +277,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_PM_PFET_POWERUP_CORE_DELAY0");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
@@ -288,7 +288,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_PM_PFET_POWERUP_CORE_DELAY1");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
@@ -299,7 +299,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_PM_PFET_POWERDOWN_CORE_DELAY0");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
@@ -310,7 +310,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_PM_PFET_POWERDOWN_CORE_DELAY1");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
@@ -321,7 +321,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_PM_PFET_POWERUP_ECO_DELAY0");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
@@ -333,7 +333,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_PM_PFET_POWERUP_ECO_DELAY1");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
@@ -343,7 +343,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_PM_PFET_POWERDOWN_ECO_DELAY0");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
@@ -354,7 +354,7 @@ pfet_init(const Target& i_target)
if (l_rc)
{
FAPI_ERR("fapiGetAttribute ATTR_PM_PFET_POWERDOWN_ECO_DELAY1");
- FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
+ // FAPI_SET_HWP_ERROR(l_rc, RC_PROCPM_PFET_GET_ATTR);
break;
}
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pm.H b/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pm.H
index 6999f9bf0..c9e54eaa0 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pm.H
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/p8_pm.H
@@ -20,7 +20,7 @@
/* Origin: 30 */
/* */
/* IBM_PROLOG_END_TAG */
-// $Id: p8_pm.H,v 1.2 2012/12/07 20:20:10 stillgs Exp $
+// $Id: p8_pm.H,v 1.3 2013/03/05 23:01:11 stillgs Exp $
// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ipl/fapi/p8_pm.H,v $
//------------------------------------------------------------------------------
// *|
@@ -69,6 +69,39 @@ enum p8_PM_FLOW_MODE {
#endif // _P8_PM_FLOW_MODE
+// Macros to enhance readability yet provide for error handling
+// Assume the error path is to break out of the current loop. If nested loops
+// are employed, the error_flag can be used to break out of the necessary
+// levels.
+#define PUTSCOM(_mi_target, _mi_address, _mi_buffer){ \
+ l_rc = fapiPutScom(_mi_target, _mi_address, _mi_buffer); \
+ if(!l_rc.ok()) \
+ { \
+ FAPI_ERR("PutScom error to address 0x%08llx", _mi_address); \
+ error_flag=true; \
+ break; \
+ } \
+}
+
+#define GETSCOM(_mi_target, _mi_address, _mi_buffer){ \
+ l_rc = fapiGetScom(_mi_target, _mi_address, _mi_buffer); \
+ if(!l_rc.ok()) \
+ { \
+ FAPI_ERR("GetScom error to address 0x%08llx", _mi_address); \
+ error_flag=true; \
+ break; \
+ } \
+}
+
+#define E_RC_CHECK(_mi_e_rc, _mi_l_rc){ \
+ if (e_rc) \
+ { \
+ FAPI_ERR("Error (0x%x) accessing ecmdDataBufferBase", _mi_e_rc);\
+ _mi_l_rc.setEcmdError(_mi_e_rc); \
+ break; \
+ } \
+}
+
} // extern "C"
#endif // _P8_PM_H_
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/pgp_pba.h b/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/pgp_pba.h
index 20b59ce62..365be5e6d 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/pgp_pba.h
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/pgp_pba.h
@@ -1,30 +1,29 @@
-/* IBM_PROLOG_BEGIN_TAG
- * This is an automatically generated prolog.
- *
- * $Source: src/usr/hwpf/hwp/build_winkle_images/proc_set_pore_bar/pgp_pba.h $
- *
- * IBM CONFIDENTIAL
- *
- * COPYRIGHT International Business Machines Corp. 2012
- *
- * p1
- *
- * Object Code Only (OCO) source materials
- * Licensed Internal Code Source Materials
- * IBM HostBoot Licensed Internal Code
- *
- * The source code for this program is not published or other-
- * wise divested of its trade secrets, irrespective of what has
- * been deposited with the U.S. Copyright Office.
- *
- * Origin: 30
- *
- * IBM_PROLOG_END_TAG
- */
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_set_pore_bar/pgp_pba.h $ */
+/* */
+/* IBM CONFIDENTIAL */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2013 */
+/* */
+/* p1 */
+/* */
+/* Object Code Only (OCO) source materials */
+/* Licensed Internal Code Source Materials */
+/* IBM HostBoot Licensed Internal Code */
+/* */
+/* The source code for this program is not published or otherwise */
+/* divested of its trade secrets, irrespective of what has been */
+/* deposited with the U.S. Copyright Office. */
+/* */
+/* Origin: 30 */
+/* */
+/* IBM_PROLOG_END_TAG */
#ifndef __PGP_PBA_H__
#define __PGP_PBA_H__
-// $Id: pgp_pba.h,v 1.1 2012/08/13 13:04:35 stillgs Exp $
+// $Id: pgp_pba.h,v 1.2 2012/10/05 18:42:15 pchatnah Exp $
/// \file pgp_pba.h
/// \brief PBA unit header. Local and mechanically generated macros.
@@ -32,7 +31,7 @@
/// \todo Add Doxygen grouping to constant groups
//#include "pba_register_addresses.h"
-#include "pba_firmware_registers.h"
+#include "pba_firmware_register.H"
#define POWERBUS_CACHE_LINE_SIZE 128
#define LOG_POWERBUS_CACHE_LINE_SIZE 7
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_delta_scan_rw.h b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_delta_scan_rw.h
index 4699072ea..4e5e8b51e 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_delta_scan_rw.h
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_delta_scan_rw.h
@@ -20,7 +20,7 @@
/* Origin: 30 */
/* */
/* IBM_PROLOG_END_TAG */
-// $Id: p8_delta_scan_rw.h,v 1.38 2013/03/06 18:21:46 cmolsen Exp $
+// $Id: p8_delta_scan_rw.h,v 1.40 2013/03/22 04:12:02 cmolsen Exp $
#define OVERRIDE_OFFSET 8 // Byte offset of forward pointer's addr relative
// to base forward pointer's addr.
#define SIZE_IMAGE_BUF_MAX 5000000 // Max ~50MB image buffer size.
@@ -89,6 +89,7 @@
#define IMGBUILD_INVALID_IMAGE 10 // Invalid image.
#define IMGBUILD_IMAGE_SIZE_MISMATCH 11 // Mismatch between image sizes.
#define IMGBUILD_IMAGE_SIZE_MESS 12 // Messed up image or section sizes.
+#define IMGBUILD_ERR_DECOMPRESSION 13 // Error assoc with decompressing RS4.
#define IMGBUILD_ERR_PORE_INLINE 20 // Pore inline error.
#define IMGBUILD_ERR_PORE_INLINE_ASM 21 // Err assoc w/inline assembler.
#define IMGBUILD_ERR_WF_CREATE 45 // Err assoc w/create_wiggle_flip_prg.
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_image_help_base.C b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_image_help_base.C
index 7c60726b0..a4f1f6009 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_image_help_base.C
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_image_help_base.C
@@ -20,7 +20,7 @@
/* Origin: 30 */
/* */
/* IBM_PROLOG_END_TAG */
-// $Id: p8_image_help_base.C,v 1.6 2013/01/02 03:01:28 cmolsen Exp $
+// $Id: p8_image_help_base.C,v 1.9 2013/03/01 22:23:03 cmolsen Exp $
/*------------------------------------------------------------------------------*/
/* *! TITLE : p8_image_help_base.c */
/* *! DESCRIPTION : Basic helper functions for building and extracting */
@@ -54,132 +54,133 @@ extern "C" {
// DeltaRingLayout, so you can use the non-ptr members to point to values
// in the image.
//
-int get_ring_layout_from_image2( const void *i_imageIn,
- uint32_t i_ddLevel,
- uint8_t i_sysPhase,
- DeltaRingLayout **o_rs4RingLayout,
- void **nextRing)
+int get_ring_layout_from_image2( const void *i_imageIn,
+ uint32_t i_ddLevel,
+ uint8_t i_sysPhase,
+ DeltaRingLayout **o_rs4RingLayout,
+ void **nextRing)
{
- uint32_t rc=0, rcLoc=0;
- uint8_t bRingFound=0, bRingEOS=0;
- DeltaRingLayout *thisRingLayout, *nextRingLayout; //Pointers into memory mapped image. DO NOT CHANGE MEMBERS!
- uint32_t sizeInitf;
- SbeXipSection hostSection;
- void *initfHostAddress0;
-
- SBE_XIP_ERROR_STRINGS(g_errorStrings);
+ uint32_t rc=0, rcLoc=0;
+ uint8_t bRingFound=0, bRingEOS=0;
+ DeltaRingLayout *thisRingLayout, *nextRingLayout; //Pointers into memory mapped image. DO NOT CHANGE MEMBERS!
+ uint32_t sizeRings;
+ SbeXipSection hostSection;
+ void *ringsHostAddress0;
+
+ SBE_XIP_ERROR_STRINGS(g_errorStrings);
- // Always first get the .initf stats from the TOC:
- // - .initf host address offset and
- // - .initf size
- //
+ // Always first get the .rings stats from the TOC:
+ // - .rings host address offset and
+ // - .rings size
+ //
rc = sbe_xip_get_section( i_imageIn, SBE_XIP_SECTION_RINGS, &hostSection);
if (rc) {
MY_INF("ERROR : sbe_xip_get_section() failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
- MY_INF("Probable cause:");
- MY_INF("\tThe section (=SBE_XIP_SECTION_RINGS=%i) was not found.",SBE_XIP_SECTION_RINGS);
- return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
+ MY_INF("Probable cause:");
+ MY_INF("\tThe section (=SBE_XIP_SECTION_RINGS=%i) was not found.",SBE_XIP_SECTION_RINGS);
+ return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
}
if (hostSection.iv_offset==0) {
- MY_INF("INFO : No ring data exists for the section ID = SBE_XIP_SECTION_RINGS (ID=%i).",SBE_XIP_SECTION_RINGS);
- return DSLWB_RING_SEARCH_NO_MATCH; // Implies exhaust search as well.
+ MY_INF("INFO : No ring data exists for the section ID = SBE_XIP_SECTION_RINGS (ID=%i).",SBE_XIP_SECTION_RINGS);
+ return DSLWB_RING_SEARCH_NO_MATCH; // Implies exhaust search as well.
}
- initfHostAddress0 = (void*)((uintptr_t)i_imageIn + hostSection.iv_offset);
- sizeInitf = hostSection.iv_size;
+ ringsHostAddress0 = (void*)((uintptr_t)i_imageIn + hostSection.iv_offset);
+ sizeRings = hostSection.iv_size;
- // On first call, get the base offset to the .initf section.
- // On subsequent calls, we're into the search for ddLevel and sysPhase, so use nextRing instead.
- //
- if (*nextRing==NULL)
- nextRingLayout = (DeltaRingLayout*)initfHostAddress0;
- else
- nextRingLayout = (DeltaRingLayout*)*nextRing;
+ // On first call, get the base offset to the .rings section.
+ // On subsequent calls, we're into the search for ddLevel and sysPhase, so use nextRing instead.
+ //
+ if (*nextRing==NULL)
+ nextRingLayout = (DeltaRingLayout*)ringsHostAddress0;
+ else
+ nextRingLayout = (DeltaRingLayout*)*nextRing;
- MY_DBG("initfHostAddress0 = 0x%016llx",(uint64_t)initfHostAddress0);
- MY_DBG("sizeInitf = %i", sizeInitf);
- MY_DBG("nextRingLayout = 0x%016llx",(uint64_t)nextRingLayout);
-
- // Populate the output RS4 ring BE layout structure as well as local structure in host LE format where needed.
- // Note! Entire memory content is in BE format. So we do LE conversions where needed.
- //
- bRingFound = 0;
- bRingEOS = 0;
-
- // SEARCH loop: Parse ring blocks successively until we find a ring that matches:
- // ddLevel == i_ddLevel
- // sysPhase == i_sysPhase
- //
- while (!bRingFound && !bRingEOS) {
- thisRingLayout = nextRingLayout;
- MY_DBG("Next backItemPtr = 0x%016llx",myRev64(thisRingLayout->backItemPtr));
- MY_DBG("Next ddLevel = 0x%02x",myRev32(thisRingLayout->ddLevel));
- MY_DBG("Next sysPhase = %i",thisRingLayout->sysPhase);
- MY_DBG("Next override = %i",thisRingLayout->override);
- MY_DBG("Next reserved1 = %i",thisRingLayout->reserved1);
- MY_DBG("Next reserved2 = %i",thisRingLayout->reserved2);
-
- if (myRev32(thisRingLayout->ddLevel)==i_ddLevel) { // Is there a non-specific DD level, like for sys phase?
- if ((thisRingLayout->sysPhase==0 && i_sysPhase==0) ||
- (thisRingLayout->sysPhase==1 && i_sysPhase==1) ||
- (thisRingLayout->sysPhase==2 && (i_sysPhase==0 || i_sysPhase==1))) {
- bRingFound = 1;
- MY_DBG("\tRing match found!");
- }
- }
- nextRingLayout = (DeltaRingLayout*)((uintptr_t)thisRingLayout + myRev32(thisRingLayout->sizeOfThis));
- *nextRing = (void*)nextRingLayout;
- if (nextRingLayout>=(DeltaRingLayout*)((uintptr_t)initfHostAddress0+sizeInitf)) {
- bRingEOS = 1;
- *nextRing = NULL;
- MY_DBG("\tRing search exhausted!");
- }
-
- } // End of SEARCH.
+ MY_DBG("ringsHostAddress0 = 0x%016llx",(uint64_t)ringsHostAddress0);
+ MY_DBG("sizeRings = %i", sizeRings);
+ MY_DBG("nextRingLayout = 0x%016llx",(uint64_t)nextRingLayout);
+
+ // Populate the output RS4 ring BE layout structure as well as local structure in host LE format where needed.
+ // Note! Entire memory content is in BE format. So we do LE conversions where needed.
+ //
+ bRingFound = 0;
+ bRingEOS = 0;
+
+ // SEARCH loop: Parse ring blocks successively until we find a ring that matches:
+ // ddLevel == i_ddLevel
+ // sysPhase == i_sysPhase
+ //
+ while (!bRingFound && !bRingEOS) {
+ thisRingLayout = nextRingLayout;
+ MY_DBG("Next backItemPtr = 0x%016llx",myRev64(thisRingLayout->backItemPtr));
+ MY_DBG("Next ddLevel = 0x%02x",myRev32(thisRingLayout->ddLevel));
+ MY_DBG("Next sysPhase = %i",thisRingLayout->sysPhase);
+ MY_DBG("Next override = %i",thisRingLayout->override);
+ MY_DBG("Next reserved1 = %i",thisRingLayout->reserved1);
+ MY_DBG("Next reserved2 = %i",thisRingLayout->reserved2);
+
+ if (myRev32(thisRingLayout->ddLevel)==i_ddLevel) { // Is there a non-specific DD level, like for sys phase?
+ if ((thisRingLayout->sysPhase==0 && i_sysPhase==0) ||
+ (thisRingLayout->sysPhase==1 && i_sysPhase==1) ||
+ (thisRingLayout->sysPhase==2 && (i_sysPhase==0 || i_sysPhase==1))) {
+ bRingFound = 1;
+ MY_DBG("\tRing match found!");
+ }
+ }
+ nextRingLayout = (DeltaRingLayout*)((uintptr_t)thisRingLayout + myRev32(thisRingLayout->sizeOfThis));
+ *nextRing = (void*)nextRingLayout;
+ if (nextRingLayout>=(DeltaRingLayout*)((uintptr_t)ringsHostAddress0+sizeRings)) {
+ bRingEOS = 1;
+ *nextRing = NULL;
+ MY_DBG("\tRing search exhausted!");
+ }
+
+ } // End of SEARCH.
- if (bRingFound) {
- if (bRingEOS)
- rcLoc = DSLWB_RING_SEARCH_EXHAUST_MATCH;
- else
- rcLoc = DSLWB_RING_SEARCH_MATCH;
- }
- else {
- *nextRing = NULL;
- if (bRingEOS)
- return DSLWB_RING_SEARCH_NO_MATCH; // Implies exhaust search as well.
- else {
- MY_INF("Messed up ring search. Check code and .rings content. Returning nothing.");
- return DSLWB_RING_SEARCH_MESS;
- }
- }
+ if (bRingFound) {
+ if (bRingEOS)
+ rcLoc = DSLWB_RING_SEARCH_EXHAUST_MATCH;
+ else
+ rcLoc = DSLWB_RING_SEARCH_MATCH;
+ }
+ else {
+ *nextRing = NULL;
+ if (bRingEOS)
+ return DSLWB_RING_SEARCH_NO_MATCH; // Implies exhaust search as well.
+ else {
+ MY_INF("Messed up ring search. Check code and .rings content. Returning nothing.");
+ return DSLWB_RING_SEARCH_MESS;
+ }
+ }
*o_rs4RingLayout = thisRingLayout;
- // Check that the ring layout structure in the memory is 8-byte aligned. This must be so because:
- // - The entryOffset address must be on an 8-byte boundary because the start of the .initf ELF section must
- // be 8-byte aligned AND because the rs4Delta member is the last member and which must itself be 8-byte aligned.
- // - These two things together means that both the beginning and end of the delta ring layout must be 8-byte
- // aligned, and thus the whole block,i.e. sizeOfThis, must be 8-byte aligned.
- // Also check that the RS4 delta ring is 8-byte aligned.
- // Also check that the RS4 launcher is 8-byte aligned.
- //
- if (((uintptr_t)thisRingLayout-(uintptr_t)i_imageIn)%8 ||
- myRev32(thisRingLayout->sizeOfThis)%8 ||
- myRev64(thisRingLayout->entryOffset)%8 ) {
- MY_INF("Ring block or ring code section is not 8-byte aligned:");
- MY_INF(" thisRingLayout-imageIn = %i",(uintptr_t)thisRingLayout-(uintptr_t)i_imageIn);
- MY_INF(" thisRingLayout->sizeOfThis = %i",myRev32(thisRingLayout->sizeOfThis));
- MY_INF(" thisRingLayout->entryOffset = %i",(uint32_t)myRev64(thisRingLayout->entryOffset));
- return IMGBUILD_ERR_MISALIGNED_RING_LAYOUT;
- }
+ // Check that the ring layout structure in the memory is 8-byte aligned:
+ // - The entryOffset address must be on an 8-byte boundary because the start of the
+ // .rings section must be 8-byte aligned AND because the rs4Delta member is the
+ // last member and which must itself be 8-byte aligned. These two things together
+ // means that both the beginning and end of the delta ring layout must be 8-byte
+ // aligned, and thus the whole block,i.e. sizeOfThis, must be 8-byte aligned.
+ // Also check that the RS4 delta ring is 8-byte aligned.
+ // Also check that the RS4 launcher is 8-byte aligned.
+ //
+ if (((uintptr_t)thisRingLayout-(uintptr_t)i_imageIn)%8 ||
+ myRev32(thisRingLayout->sizeOfThis)%8 ||
+ myRev64(thisRingLayout->entryOffset)%8 ) {
+ MY_INF("Ring block or ring code section is not 8-byte aligned:");
+ MY_INF(" thisRingLayout-imageIn = 0x%08x",(uint32_t)((uintptr_t)thisRingLayout-(uintptr_t)i_imageIn));
+ MY_INF(" thisRingLayout->sizeOfThis = 0x%08x",myRev32(thisRingLayout->sizeOfThis));
+ MY_INF(" thisRingLayout->entryOffset = 0x%016llx",(uint64_t)myRev64(thisRingLayout->entryOffset));
+ return IMGBUILD_ERR_MISALIGNED_RING_LAYOUT;
+ }
- if (*nextRing > (void*)((uintptr_t)initfHostAddress0 + sizeInitf)) {
- MY_INF("Book keeping got messed up during .initf search. Initf section does not appear aligned.");
- MY_INF("initfHostAddress0+sizeInitf = 0x%016llx",(uint64_t)initfHostAddress0+sizeInitf);
- MY_INF("nextRing = %i",*(uint32_t*)nextRing);
- MY_INF("Continuing...");
- }
+ if (*nextRing > (void*)((uintptr_t)ringsHostAddress0 + sizeRings)) {
+ MY_INF("Book keeping got messed up during .rings search. .rings section does not appear aligned.");
+ MY_INF("ringsHostAddress0+sizeRings = 0x%016llx",(uint64_t)ringsHostAddress0+sizeRings);
+ MY_INF("nextRing = 0x%016llx",*(uint64_t*)nextRing);
+ MY_INF("Continuing...");
+ }
- return rcLoc;
+ return rcLoc;
}
@@ -188,7 +189,7 @@ int get_ring_layout_from_image2( const void *i_imageIn,
// Comments:
// - Appends an RS4 or WF ring block to the .rings section. It doesn't care
// what type of ring it is. The only data that might be updated in the ring
-// block is the back pointer which is shared between both types of rings.
+// block is the backItemPtr which is shared between both types of rings.
// - If ringName=NULL: Assumes fwd ptr already exists in .ipl_data or .data
// section. Back pointer in ring block is unchanged.
// - If ringName!=NULL: Adds fwd ptr to .ipl_data or .data section. Updates back
@@ -200,78 +201,81 @@ int get_ring_layout_from_image2( const void *i_imageIn,
// - overridable: Indicates if a ring can be overridden. It is ignored if
// ringName==NULL.
// - Assumes ring block is in BE format.
-int write_ring_block_to_image( void *io_image,
+int write_ring_block_to_image( void *io_image,
const char *i_ringName,
- DeltaRingLayout *i_ringBlock,
- const uint8_t i_idxVector,
- const uint8_t i_override,
- const uint8_t i_overridable,
- const uint32_t i_sizeImageMax)
+ DeltaRingLayout *i_ringBlock,
+ const uint8_t i_idxVector,
+ const uint8_t i_override,
+ const uint8_t i_overridable,
+ const uint32_t i_sizeImageMax)
{
- uint32_t rc=0;
- SbeXipItem tocItem;
- uint32_t offsetRingBlock=1; // Initialize to anything but zero.
- uint32_t sizeImage=0;
- uint64_t ringPoreAddress=0,backPtr=0,fwdPtrCheck;
+ uint32_t rc=0;
+ SbeXipItem tocItem;
+ uint32_t offsetRingBlock=1; // Initialize to anything but zero.
+ uint32_t sizeImage=0;
+ uint64_t ringPoreAddress=0,backPtr=0,fwdPtrCheck;
- SBE_XIP_ERROR_STRINGS(g_errorStrings);
+ SBE_XIP_ERROR_STRINGS(g_errorStrings);
- if (myRev64(i_ringBlock->entryOffset)%8) {
- MY_INF("Ring code section is not 8-byte aligned.");
- return IMGBUILD_ERR_MISALIGNED_RING_LAYOUT;
- }
+ if (myRev64(i_ringBlock->entryOffset)%8) {
+ MY_ERR("Ring code section is not 8-byte aligned.");
+ return IMGBUILD_ERR_MISALIGNED_RING_LAYOUT;
+ }
if (i_ringName) {
- // Obtain the back pointer to the .data item, i.e. the location of the ptr associated with the
- // ring/var name in the TOC.
- //
- rc = sbe_xip_find( io_image, i_ringName, &tocItem);
- if (rc) {
+ // Obtain the back pointer to the .data item, i.e. the location of the ptr associated with the
+ // ring/var name in the TOC.
+ //
+ rc = sbe_xip_find( io_image, i_ringName, &tocItem);
+ if (rc) {
MY_ERR("sbe_xip_find() failed w/rc=%i", rc);
- MY_ERR("Probable cause: Ring name (=%s) not found in image.", i_ringName);
- return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
- }
- i_ringBlock->backItemPtr = myRev64( tocItem.iv_address +
- i_idxVector*8*(1+i_overridable) +
- 8*i_override*i_overridable );
+ MY_ERR("Probable cause: Ring name (=%s) not found in image.", i_ringName);
+ return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
+ }
+ i_ringBlock->backItemPtr = myRev64( tocItem.iv_address +
+ i_idxVector*8*(1+i_overridable) +
+ 8*i_override*i_overridable );
}
- // Append ring block to .rings section.
- //
- rc = sbe_xip_append(io_image,
- SBE_XIP_SECTION_RINGS,
- (void*)i_ringBlock,
- myRev32(i_ringBlock->sizeOfThis),
- i_sizeImageMax,
- &offsetRingBlock);
+ // Append ring block to .rings section.
+ //
+ rc = sbe_xip_append(io_image,
+ SBE_XIP_SECTION_RINGS,
+ (void*)i_ringBlock,
+ myRev32(i_ringBlock->sizeOfThis),
+ i_sizeImageMax,
+ &offsetRingBlock);
if (rc) {
- MY_INF("sbe_xip_append() failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
+ MY_ERR("sbe_xip_append() failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
+ sbe_xip_image_size(io_image,&sizeImage);
+ MY_ERR("Input image size: %i\n", sizeImage);
+ MY_ERR("Max image size allowed: %i\n", i_sizeImageMax);
return IMGBUILD_ERR_APPEND;
}
- // ...get new image size and test if successful update.
- rc = sbe_xip_image_size( io_image, &sizeImage);
- MY_DBG("Updated image size (after append): %i",sizeImage);
+ // ...get new image size and test if successful update.
+ rc = sbe_xip_image_size( io_image, &sizeImage);
+ MY_DBG("Updated image size (after append): %i",sizeImage);
if (rc) {
- MY_INF("sbe_xip_image_size() of output image failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
+ MY_ERR("sbe_xip_image_size() of output image failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
return IMGBUILD_ERR_XIP_MISC;
}
rc = sbe_xip_validate( io_image, sizeImage);
if (rc) {
- MY_INF("sbe_xip_validate() of output image failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
+ MY_ERR("sbe_xip_validate() of output image failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
return IMGBUILD_ERR_XIP_MISC;
}
-
- // Update forward pointer associated with the ring/var name + any override offset.
- //
- // Convert the ring offset (wrt .rings address) to an PORE address
- rc = sbe_xip_section2pore(io_image, SBE_XIP_SECTION_RINGS, offsetRingBlock, &ringPoreAddress);
+
+ // Update forward pointer associated with the ring/var name + any override offset.
+ //
+ // Convert the ring offset (wrt .rings address) to an PORE address
+ rc = sbe_xip_section2pore(io_image, SBE_XIP_SECTION_RINGS, offsetRingBlock, &ringPoreAddress);
MY_DBG("fwdPtr=0x%016llx", ringPoreAddress);
if (rc) {
- MY_INF("sbe_xip_section2pore() failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
+ MY_ERR("sbe_xip_section2pore() failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
return IMGBUILD_ERR_XIP_MISC;
}
- // Now, update the forward pointer.
+ // Now, update the forward pointer.
//
// First, retrieve the ring block's backPtr which tells us where the fwd ptr
// is located.
@@ -281,40 +285,40 @@ int write_ring_block_to_image( void *io_image,
// backItemPtr in the input ring block already has this from the ref image,
// and it shouldn't have changed after having been ported over to an
// IPL/Seeprom image.
- backPtr = myRev64(i_ringBlock->backItemPtr);
- MY_DBG("backPtr = 0x%016llx", backPtr);
+ backPtr = myRev64(i_ringBlock->backItemPtr);
+ MY_DBG("backPtr = 0x%016llx", backPtr);
// Second, put the ring's Pore addr into the location pointed to by the back ptr.
- rc = sbe_xip_write_uint64( io_image,
- backPtr,
- ringPoreAddress);
+ rc = sbe_xip_write_uint64( io_image,
+ backPtr,
+ ringPoreAddress);
// Third, let's read it back to make sure we're OK a little further down.
- rc = rc+sbe_xip_read_uint64(io_image,
- backPtr,
- &fwdPtrCheck);
- if (rc) {
- MY_INF("sbe_xip_[write,read]_uint64() failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
- return IMGBUILD_ERR_XIP_MISC;
- }
+ rc = rc+sbe_xip_read_uint64(io_image,
+ backPtr,
+ &fwdPtrCheck);
+ if (rc) {
+ MY_ERR("sbe_xip_[write,read]_uint64() failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
+ return IMGBUILD_ERR_XIP_MISC;
+ }
// Check for pointer mess.
- if (fwdPtrCheck!=ringPoreAddress || backPtr!=myRev64(i_ringBlock->backItemPtr)) {
- MY_INF("Forward or backward pointer mess. Check code.");
- MY_INF("fwdPtr =0x%016llx",ringPoreAddress);
- MY_INF("fwdPtrCheck =0x%016llx",fwdPtrCheck);
- MY_INF("layout bckPtr=0x%016llx",myRev64(i_ringBlock->backItemPtr));
- MY_INF("backPtr =0x%016llx",backPtr);
- return IMGBUILD_ERR_FWD_BACK_PTR_MESS;
- }
- // ...test if successful update.
+ if (fwdPtrCheck!=ringPoreAddress || backPtr!=myRev64(i_ringBlock->backItemPtr)) {
+ MY_ERR("Forward or backward pointer mess. Check code.");
+ MY_ERR("fwdPtr =0x%016llx",ringPoreAddress);
+ MY_ERR("fwdPtrCheck =0x%016llx",fwdPtrCheck);
+ MY_ERR("layout bckPtr=0x%016llx",myRev64(i_ringBlock->backItemPtr));
+ MY_ERR("backPtr =0x%016llx",backPtr);
+ return IMGBUILD_ERR_FWD_BACK_PTR_MESS;
+ }
+ // ...test if successful update.
rc = sbe_xip_validate( io_image, sizeImage);
if (rc) {
- MY_INF("sbe_xip_validate() of output image failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
- MY_INF("Probable cause: sbe_xip_write_uint64() updated at the wrong address (=0x%016llx)",
- myRev64(i_ringBlock->backItemPtr));
+ MY_ERR("sbe_xip_validate() of output image failed: %s", SBE_XIP_ERROR_STRING(g_errorStrings, rc));
+ MY_ERR("Probable cause: sbe_xip_write_uint64() updated at the wrong address (=0x%016llx)",
+ myRev64(i_ringBlock->backItemPtr));
return IMGBUILD_ERR_XIP_MISC;
}
- return IMGBUILD_SUCCESS;
+ return IMGBUILD_SUCCESS;
}
@@ -330,33 +334,124 @@ uint64_t calc_ring_layout_entry_offset(
if (i_typeRingLayout==0) {
// RS4 ring block.
ringBlock.entryOffset = (uint64_t)(
- sizeof(ringBlock.entryOffset) +
- sizeof(ringBlock.backItemPtr) +
- sizeof(ringBlock.sizeOfThis) +
- sizeof(ringBlock.sizeOfMeta) +
- sizeof(ringBlock.ddLevel) +
- sizeof(ringBlock.sysPhase) +
- sizeof(ringBlock.override) +
- sizeof(ringBlock.reserved1) +
- sizeof(ringBlock.reserved2) +
- myByteAlign(8, i_sizeMetaData) ); // 8-byte align RS4 launch.
- }
- else
- if (i_typeRingLayout==1) {
- // Wiggle-flip ring block.
+ sizeof(ringBlock.entryOffset) +
+ sizeof(ringBlock.backItemPtr) +
+ sizeof(ringBlock.sizeOfThis) +
+ sizeof(ringBlock.sizeOfMeta) +
+ sizeof(ringBlock.ddLevel) +
+ sizeof(ringBlock.sysPhase) +
+ sizeof(ringBlock.override) +
+ sizeof(ringBlock.reserved1) +
+ sizeof(ringBlock.reserved2) +
+ myByteAlign(8, i_sizeMetaData) ); // 8-byte align RS4 launch.
+ }
+ else
+ if (i_typeRingLayout==1) {
+ // Wiggle-flip ring block.
ringBlock.entryOffset = (uint64_t)(
- sizeof(ringBlock.entryOffset) +
- sizeof(ringBlock.backItemPtr) +
- sizeof(ringBlock.sizeOfThis) +
- sizeof(ringBlock.sizeOfMeta) +
- myByteAlign(8, i_sizeMetaData) ); // 8-byte align WF prg.
- }
- else
- return MAX_UINT64_T;
-
+ sizeof(ringBlock.entryOffset) +
+ sizeof(ringBlock.backItemPtr) +
+ sizeof(ringBlock.sizeOfThis) +
+ sizeof(ringBlock.sizeOfMeta) +
+ myByteAlign(8, i_sizeMetaData) ); // 8-byte align WF prg.
+ }
+ else
+ return MAX_UINT64_T;
+
return ringBlock.entryOffset;
}
+// Function: over_write_ring_data_in_image()
+// Comments:
+// - Overwrites RS4 or WF ring block data in the .rings section. It doesn't care
+// what type of ring it is. The only data that might be updated in the ring
+// block is the sizeOfThis which is shared between both types of rings.
+// - If ringName=NULL: ?
+// - If ringName!=NULL: ?
+// - ringData: The actual RS4 ring data, incl container, or the WF program.
+// - sizeRingData: Byte size of ring data. This includes RS4 launch in case of RS4.
+// - idxVector: Contains the index number of a vector array. This is pretty much
+// limited for ex_ chiplet IDs. It is ignored if ringName==NULL.
+// - override: Indicates if the ring is an override ring. It is ignored if
+// ringName==NULL.
+// - overridable: Indicates if a ring can be overridden. It is ignored if
+// ringName==NULL.
+int over_write_ring_data_in_image( void *io_image,
+ const char *i_ringName,
+ const void *i_ringData, // WF or RS4
+ const uint32_t i_sizeRingData, // Byte size
+ const uint8_t i_idxVector,
+ const uint8_t i_override,
+ const uint8_t i_overridable )
+{
+ uint32_t rc=0;
+ SbeXipItem tocItem;
+ uint32_t sizeImage=0;
+ void *hostVectorBase, *hostVectorThis;
+ DeltaRingLayout *hostRingBlock;
+ void *hostRingData;
+
+ // Test if valid image to start with since we're going to mess with it w/o using
+ // sbe_xip functions.
+ sbe_xip_image_size( io_image, &sizeImage);
+ rc = sbe_xip_validate( io_image, sizeImage);
+ if (rc) {
+ MY_ERR("sbe_xip_validate() failed w/rc=%i\n", rc);
+ return IMGBUILD_ERR_XIP_MISC;
+ }
+
+ // Calculate the host location of the ring.
+ //
+ rc = sbe_xip_find( io_image, i_ringName, &tocItem);
+ if (rc) {
+ MY_ERR("sbe_xip_find() failed w/rc=%i", rc);
+ MY_ERR("Probable cause: Ring name (=%s) not found in image.", i_ringName);
+ return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
+ }
+ sbe_xip_pore2host( io_image, tocItem.iv_address, &hostVectorBase);
+ hostVectorThis = (void*) ( (uint64_t)hostVectorBase +
+ i_idxVector*8*(1+i_overridable) +
+ 8*i_override*i_overridable );
+ hostRingBlock = (DeltaRingLayout*)(*(uintptr_t*)hostVectorThis);
+ hostRingData = (void*)( (uint64_t)hostRingBlock + hostRingBlock->entryOffset );
+
+ // Over write ringData onto existing ring data content in image.
+ //
+ memcpy(hostRingData, i_ringData, i_sizeRingData);
+
+ // Update size of new ring block.
+ //
+ hostRingBlock->sizeOfThis = hostRingBlock->entryOffset + i_sizeRingData;
+
+ // Test if successful update.
+ rc = sbe_xip_validate( io_image, sizeImage);
+ if (rc) {
+ MY_ERR("sbe_xip_validate() failed w/rc=%i\n", rc);
+ MY_ERR("We really screwed up the image here. This is a coding error. Here's some data:\n");
+ MY_ERR("io_image = 0x%016llx\n",(uint64_t)io_image);
+ MY_ERR("hostVectorBase = 0x%016llx\n",(uint64_t)hostVectorBase);
+ MY_ERR("hostVectorThis = 0x%016llx\n",(uint64_t)hostVectorThis);
+ MY_ERR("hostRingBlock = 0x%016llx\n",(uint64_t)hostRingBlock);
+ MY_ERR("hostRingData = 0x%016llx\n",(uint64_t)hostRingData);
+ return IMGBUILD_ERR_XIP_MISC;
+ }
+
+ MY_DBG("Dumping ring layout of over-writen ring:");
+ MY_DBG(" entryOffset = 0x%016llx",myRev64(hostRingBlock->entryOffset));
+ MY_DBG(" backItemPtr = 0x%016llx",myRev64(hostRingBlock->backItemPtr));
+ MY_DBG(" sizeOfThis = %i",myRev32(hostRingBlock->sizeOfThis));
+ MY_DBG(" sizeOfMeta = %i",myRev32(hostRingBlock->sizeOfMeta));
+ MY_DBG(" ddLevel = %i",myRev32(hostRingBlock->ddLevel));
+ MY_DBG(" sysPhase = %i",hostRingBlock->sysPhase);
+ MY_DBG(" override = %i",hostRingBlock->override);
+ MY_DBG(" reserved1+2 = %i",hostRingBlock->reserved1|hostRingBlock->reserved2);
+
+
+ return IMGBUILD_SUCCESS;
+}
+
+
+
}
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_ring_identification.c b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_ring_identification.c
index 3923c5d59..7a061f12d 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_ring_identification.c
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_ring_identification.c
@@ -20,7 +20,7 @@
/* Origin: 30 */
/* */
/* IBM_PROLOG_END_TAG */
-// $Id: p8_ring_identification.c,v 1.14 2012/12/19 12:49:27 cmolsen Exp $
+// $Id: p8_ring_identification.c,v 1.18 2013/02/12 23:30:07 cmolsen Exp $
/*------------------------------------------------------------------------------*/
/* *! (C) Copyright International Business Machines Corp. 2012 */
/* *! All Rights Reserved -- Property of IBM */
@@ -34,121 +34,119 @@
#include <p8_ring_identification.H>
const RingIdList RING_ID_LIST_PG[] = {
- /* ringName ringId chipIdMin chipIdMax ringNameImg mvpdKeyword */
- { "ab_gptr_ab", 0xA0, 0x08, 0x08, "ab_gptr_ab_ring", VPD_KEYWORD_PDG },
- { "ab_gptr_ioa", 0xA1, 0x08, 0x08, "ab_gptr_ioa_ring", VPD_KEYWORD_PDG },
- { "ab_gptr_perv", 0xA2, 0x08, 0x08, "ab_gptr_perv_ring", VPD_KEYWORD_PDG },
- { "ab_gptr_pll", 0xA3, 0x08, 0x08, "ab_gptr_pll_ring", VPD_KEYWORD_PDG },
- { "ab_time", 0xA4, 0x08, 0x08, "ab_time_ring", VPD_KEYWORD_PDG },
- { "ex_gptr_core", 0xA5, 0xFF, 0xFF, "ex_gptr_core_ring", VPD_KEYWORD_PDG }, //Chip specific
- { "ex_gptr_dpll", 0xA6, 0xFF, 0xFF, "ex_gptr_dpll_ring", VPD_KEYWORD_PDG }, //Chip specific
- { "ex_gptr_l2", 0xA7, 0xFF, 0xFF, "ex_gptr_l2_ring", VPD_KEYWORD_PDG }, //Chip specific
- { "ex_gptr_l3", 0xA8, 0xFF, 0xFF, "ex_gptr_l3_ring", VPD_KEYWORD_PDG }, //Chip specific
- { "ex_gptr_l3refr", 0xA9, 0xFF, 0xFF, "ex_gptr_l3refr_ring", VPD_KEYWORD_PDG }, //Chip specific
- { "ex_gptr_perv", 0xAA, 0xFF, 0xFF, "ex_gptr_perv_ring", VPD_KEYWORD_PDG }, //Chip specific
- { "ex_time_core", 0xAB, 0x10, 0x1F, "ex_time_core_ring", VPD_KEYWORD_PDG }, //Chiplet specfc
- { "ex_time_eco", 0xAC, 0x10, 0x1F, "ex_time_eco_ring", VPD_KEYWORD_PDG }, //Chiplet specfc
- { "pb_gptr_dmipll", 0xAD, 0x02, 0x02, "pb_gptr_dmipll_ring", VPD_KEYWORD_PDG },
- { "pb_gptr_mcr", 0xAE, 0x02, 0x02, "pb_gptr_mcr_ring", VPD_KEYWORD_PDG },
- { "pb_gptr_nest", 0xAF, 0x02, 0x02, "pb_gptr_nest_ring", VPD_KEYWORD_PDG },
- { "pb_gptr_nx", 0xB0, 0x02, 0x02, "pb_gptr_nx_ring", VPD_KEYWORD_PDG },
- { "pb_gptr_pcis", 0xB1, 0x02, 0x02, "pb_gptr_pcis_ring", VPD_KEYWORD_PDG },
- { "pb_gptr_perv", 0xB2, 0x02, 0x02, "pb_gptr_perv_ring", VPD_KEYWORD_PDG },
- { "pb_time", 0xB3, 0x02, 0x02, "pb_time_ring", VPD_KEYWORD_PDG },
- { "pb_time_mcr", 0xB4, 0x02, 0x02, "pb_time_mcr_ring", VPD_KEYWORD_PDG },
- { "pb_time_nx", 0xB5, 0x02, 0x02, "pb_time_nx_ring", VPD_KEYWORD_PDG },
- { "pci_gptr_iopci", 0xB6, 0x09, 0x09, "pci_gptr_iopci_ring", VPD_KEYWORD_PDG },
- { "pci_gptr_pbf", 0xB7, 0x09, 0x09, "pci_gptr_pbf_ring", VPD_KEYWORD_PDG },
- { "pci_gptr_pci0", 0xB8, 0x09, 0x09, "pci_gptr_pci0_ring", VPD_KEYWORD_PDG },
- { "pci_gptr_pci1", 0xB9, 0x09, 0x09, "pci_gptr_pci1_ring", VPD_KEYWORD_PDG },
- { "pci_gptr_pci2", 0xBA, 0x09, 0x09, "pci_gptr_pci2_ring", VPD_KEYWORD_PDG },
- { "pci_gptr_perv", 0xBB, 0x09, 0x09, "pci_gptr_perv_ring", VPD_KEYWORD_PDG },
- { "pci_gptr_pll", 0xBC, 0x09, 0x09, "pci_gptr_pll_ring", VPD_KEYWORD_PDG },
- { "pci_time", 0xBD, 0x09, 0x09, "pci_time_ring", VPD_KEYWORD_PDG },
- { "perv_gptr_net", 0xBE, 0x00, 0x00, "perv_gptr_net_ring", VPD_KEYWORD_PDG },
- { "perv_gptr_occ", 0xBF, 0x00, 0x00, "perv_gptr_occ_ring", VPD_KEYWORD_PDG },
- { "perv_gptr_perv", 0xC0, 0x00, 0x00, "perv_gptr_perv_ring", VPD_KEYWORD_PDG },
- { "perv_gptr_pib", 0xC1, 0x00, 0x00, "perv_gptr_pib_ring", VPD_KEYWORD_PDG },
- { "perv_gptr_pll", 0xC2, 0x00, 0x00, "perv_gptr_pll_ring", VPD_KEYWORD_PDG },
- { "perv_time", 0xC3, 0x00, 0x00, "perv_time_ring", VPD_KEYWORD_PDG },
- { "xb_gptr_iox", 0xC4, 0x04, 0x04, "xb_gptr_iox_ring", VPD_KEYWORD_PDG },
- { "xb_gptr_iopci", 0xC5, 0x04, 0x04, "xb_gptr_iopci_ring", VPD_KEYWORD_PDG },
- { "xb_gptr_pben", 0xC6, 0x04, 0x04, "xb_gptr_pben_ring", VPD_KEYWORD_PDG },
- { "xb_gptr_perv", 0xC7, 0x04, 0x04, "xb_gptr_perv_ring", VPD_KEYWORD_PDG },
- { "xb_time", 0xC8, 0x04, 0x04, "xb_time_ring", VPD_KEYWORD_PDG },
+ /* ringName ringId chipletId ringNameImg mvpdKeyword wc */
+ /* min max */
+ {"ab_gptr_ab", 0xA0, 0x08, 0x08, "ab_gptr_ab_ring", VPD_KEYWORD_PDG, 0},
+ {"ab_gptr_ioa", 0xA1, 0x08, 0x08, "ab_gptr_ioa_ring", VPD_KEYWORD_PDG, 0},
+ {"ab_gptr_perv", 0xA2, 0x08, 0x08, "ab_gptr_perv_ring", VPD_KEYWORD_PDG, 0},
+ {"ab_gptr_pll", 0xA3, 0x08, 0x08, "ab_gptr_pll_ring", VPD_KEYWORD_PDG, 0},
+ {"ab_time", 0xA4, 0x08, 0x08, "ab_time_ring", VPD_KEYWORD_PDG, 0},
+ {"ex_gptr_core", 0xA5, 0xFF, 0xFF, "ex_gptr_core_ring", VPD_KEYWORD_PDG, 0}, //Chip specific
+ {"ex_gptr_dpll", 0xA6, 0xFF, 0xFF, "ex_gptr_dpll_ring", VPD_KEYWORD_PDG, 0}, //Chip specific
+ {"ex_gptr_l2", 0xA7, 0xFF, 0xFF, "ex_gptr_l2_ring", VPD_KEYWORD_PDG, 0}, //Chip specific
+ {"ex_gptr_l3", 0xA8, 0xFF, 0xFF, "ex_gptr_l3_ring", VPD_KEYWORD_PDG, 0}, //Chip specific
+ {"ex_gptr_l3refr", 0xA9, 0xFF, 0xFF, "ex_gptr_l3refr_ring", VPD_KEYWORD_PDG, 0}, //Chip specific
+ {"ex_gptr_perv", 0xAA, 0xFF, 0xFF, "ex_gptr_perv_ring", VPD_KEYWORD_PDG, 0}, //Chip specific
+ {"ex_time_core", 0xAB, 0x10, 0x1F, "ex_time_core_ring", VPD_KEYWORD_PDG, 0}, //Chiplet specfc
+ {"ex_time_eco", 0xAC, 0x10, 0x1F, "ex_time_eco_ring", VPD_KEYWORD_PDG, 0}, //Chiplet specfc
+ {"pb_gptr_dmipll", 0xAD, 0x02, 0x02, "pb_gptr_dmipll_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_gptr_mcr", 0xAE, 0x02, 0x02, "pb_gptr_mcr_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_gptr_nest", 0xAF, 0x02, 0x02, "pb_gptr_nest_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_gptr_nx", 0xB0, 0x02, 0x02, "pb_gptr_nx_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_gptr_pcis", 0xB1, 0x02, 0x02, "pb_gptr_pcis_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_gptr_perv", 0xB2, 0x02, 0x02, "pb_gptr_perv_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_time", 0xB3, 0x02, 0x02, "pb_time_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_time_mcr", 0xB4, 0x02, 0x02, "pb_time_mcr_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_time_nx", 0xB5, 0x02, 0x02, "pb_time_nx_ring", VPD_KEYWORD_PDG, 0},
+ {"pci_gptr_iopci", 0xB6, 0x09, 0x09, "pci_gptr_iopci_ring", VPD_KEYWORD_PDG, 0},
+ {"pci_gptr_pbf", 0xB7, 0x09, 0x09, "pci_gptr_pbf_ring", VPD_KEYWORD_PDG, 0},
+ {"pci_gptr_pci0", 0xB8, 0x09, 0x09, "pci_gptr_pci0_ring", VPD_KEYWORD_PDG, 0},
+ {"pci_gptr_pci1", 0xB9, 0x09, 0x09, "pci_gptr_pci1_ring", VPD_KEYWORD_PDG, 0},
+ {"pci_gptr_pci2", 0xBA, 0x09, 0x09, "pci_gptr_pci2_ring", VPD_KEYWORD_PDG, 0},
+ {"pci_gptr_perv", 0xBB, 0x09, 0x09, "pci_gptr_perv_ring", VPD_KEYWORD_PDG, 0},
+ {"pci_gptr_pll", 0xBC, 0x09, 0x09, "pci_gptr_pll_ring", VPD_KEYWORD_PDG, 0},
+ {"pci_time", 0xBD, 0x09, 0x09, "pci_time_ring", VPD_KEYWORD_PDG, 0},
+ {"perv_gptr_net", 0xBE, 0x00, 0x00, "perv_gptr_net_ring", VPD_KEYWORD_PDG, 0},
+ {"perv_gptr_occ", 0xBF, 0x00, 0x00, "perv_gptr_occ_ring", VPD_KEYWORD_PDG, 0},
+ {"perv_gptr_perv", 0xC0, 0x00, 0x00, "perv_gptr_perv_ring", VPD_KEYWORD_PDG, 0},
+ {"perv_gptr_pib", 0xC1, 0x00, 0x00, "perv_gptr_pib_ring", VPD_KEYWORD_PDG, 0},
+ {"perv_gptr_pll", 0xC2, 0x00, 0x00, "perv_gptr_pll_ring", VPD_KEYWORD_PDG, 0},
+ {"perv_time", 0xC3, 0x00, 0x00, "perv_time_ring", VPD_KEYWORD_PDG, 0},
+ {"xb_gptr_iopci", 0xC4, 0x04, 0x04, "xb_gptr_iopci_ring", VPD_KEYWORD_PDG, 0},
+ {"xb_gptr_iox", 0xC5, 0x04, 0x04, "xb_gptr_iox_ring", VPD_KEYWORD_PDG, 0},
+ {"xb_gptr_pben", 0xC6, 0x04, 0x04, "xb_gptr_pben_ring", VPD_KEYWORD_PDG, 0},
+ {"xb_gptr_perv", 0xC7, 0x04, 0x04, "xb_gptr_perv_ring", VPD_KEYWORD_PDG, 0},
+ {"xb_time", 0xC8, 0x04, 0x04, "xb_time_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_gptr_mcl", 0xC9, 0x02, 0x02, "pb_gptr_mcl_ring", VPD_KEYWORD_PDG, 0},
+ {"pb_time_mcl", 0xCA, 0x02, 0x02, "pb_time_mcl_ring", VPD_KEYWORD_PDG, 0},
};
const RingIdList RING_ID_LIST_PR[] = {
/* ringName ringId chipIdMin chipIdMax ringNameImg mvpdKeyword */
- { "ab_repr", 0xE0, 0x08, 0x08, "ab_repr_ring", VPD_KEYWORD_PDR },
- { "ex_repr_core", 0xE1, 0x10, 0x1F, "ex_repr_core_ring", VPD_KEYWORD_PDR },
- { "ex_repr_eco", 0xE2, 0x10, 0x1F, "ex_repr_eco_ring", VPD_KEYWORD_PDR },
- { "pb_repr", 0xE3, 0x02, 0x02, "pb_repr_ring", VPD_KEYWORD_PDR },
- { "pb_repr_mcr", 0xE4, 0x02, 0x02, "pb_repr_mcr_ring", VPD_KEYWORD_PDR },
- { "pb_repr_nx", 0xE5, 0x02, 0x02, "pb_repr_nx_ring", VPD_KEYWORD_PDR },
- { "pci_repr", 0xE6, 0x09, 0x09, "pci_repr_ring", VPD_KEYWORD_PDR },
- { "perv_repr", 0xE7, 0x00, 0x00, "perv_repr_ring", VPD_KEYWORD_PDR },
- { "perv_repr_net", 0xE8, 0x00, 0x00, "perv_repr_net_ring", VPD_KEYWORD_PDR },
- { "perv_repr_pib", 0xE9, 0x00, 0x00, "perv_repr_pib_ring", VPD_KEYWORD_PDR },
- { "xb_repr", 0xEA, 0x04, 0x04, "xb_repr_ring", VPD_KEYWORD_PDR },
+ {"ab_repr", 0xE0, 0x08, 0x08, "ab_repr_ring", VPD_KEYWORD_PDR, 0},
+ {"ex_repr_core", 0xE1, 0x10, 0x1F, "ex_repr_core_ring", VPD_KEYWORD_PDR, 1},
+ {"ex_repr_eco", 0xE2, 0x10, 0x1F, "ex_repr_eco_ring", VPD_KEYWORD_PDR, 1},
+ {"pb_repr", 0xE3, 0x02, 0x02, "pb_repr_ring", VPD_KEYWORD_PDR, 0},
+ {"pb_repr_mcr", 0xE4, 0x02, 0x02, "pb_repr_mcr_ring", VPD_KEYWORD_PDR, 0},
+ {"pb_repr_nx", 0xE5, 0x02, 0x02, "pb_repr_nx_ring", VPD_KEYWORD_PDR, 0},
+ {"pci_repr", 0xE6, 0x09, 0x09, "pci_repr_ring", VPD_KEYWORD_PDR, 0},
+ {"perv_repr", 0xE7, 0x00, 0x00, "perv_repr_ring", VPD_KEYWORD_PDR, 0},
+ {"perv_repr_net", 0xE8, 0x00, 0x00, "perv_repr_net_ring", VPD_KEYWORD_PDR, 0},
+ {"perv_repr_pib", 0xE9, 0x00, 0x00, "perv_repr_pib_ring", VPD_KEYWORD_PDR, 0},
+ {"xb_repr", 0xEA, 0x04, 0x04, "xb_repr_ring", VPD_KEYWORD_PDR, 0},
+ {"pb_repr_mcl", 0xEB, 0x02, 0x02, "pb_repr_mcl_ring", VPD_KEYWORD_PDR, 0},
};
const uint32_t RING_ID_LIST_PG_SIZE = sizeof(RING_ID_LIST_PG)/sizeof(RING_ID_LIST_PG[0]);
const uint32_t RING_ID_LIST_PR_SIZE = sizeof(RING_ID_LIST_PR)/sizeof(RING_ID_LIST_PR[0]);
-// The following defines are probably safe to decommision at this point.
-const RingIdList RING_ID_LIST[] = {
- /* ringName ringId chipIdMin chipIdMax ringNameImg mvpdKeyword */
- { "ab_repr", 0xE0, 0x08, 0x08, "ab_repr_ring", VPD_KEYWORD_PDR },
- { "ex_repr_core", 0xE1, 0x10, 0x1F, "ex_repr_core_ring", VPD_KEYWORD_PDR },
- { "ex_repr_eco", 0xE2, 0x10, 0x1F, "ex_repr_eco_ring", VPD_KEYWORD_PDR },
- { "pb_repr", 0xE3, 0x02, 0x02, "pb_repr_ring", VPD_KEYWORD_PDR },
- { "pb_repr_mcr", 0xE4, 0x02, 0x02, "pb_repr_mcr_ring", VPD_KEYWORD_PDR },
- { "pb_repr_nx", 0xE5, 0x02, 0x02, "pb_repr_nx_ring", VPD_KEYWORD_PDR },
- { "pci_repr", 0xE6, 0x09, 0x09, "pci_repr_ring", VPD_KEYWORD_PDR },
- { "perv_repr", 0xE7, 0x00, 0x00, "perv_repr_ring", VPD_KEYWORD_PDR },
- { "perv_repr_net", 0xE8, 0x00, 0x00, "perv_repr_net_ring", VPD_KEYWORD_PDR },
- { "perv_repr_pib", 0xE9, 0x00, 0x00, "perv_repr_pib_ring", VPD_KEYWORD_PDR },
- { "xb_repr", 0xEA, 0x04, 0x04, "xb_repr_ring", VPD_KEYWORD_PDR },
-};
-const uint32_t RING_ID_LIST_SIZE = sizeof(RING_ID_LIST)/sizeof(RING_ID_LIST[0]);
-
// get_vpd_ring_list_entry() retrieves the MVPD list entry based on either a ringName
// or a ringId. If both are supplied, only the ringName is used. If ringName==NULL,
// then the ringId is used. A pointer to the RingIdList is returned.
int get_vpd_ring_list_entry(const char *i_ringName,
- const uint8_t i_ringId,
- RingIdList **i_ringIdList)
+ const uint8_t i_ringId,
+ RingIdList **i_ringIdList)
{
- int rc=0, NOT_FOUND=0, FOUND=1;
+ int rc=0, NOT_FOUND=1, FOUND=0;
uint8_t iVpdType;
uint8_t iRing;
- RingIdList *ring_id_list=NULL;
- uint8_t ring_id_list_size;
+ RingIdList *ring_id_list=NULL;
+ uint8_t ring_id_list_size;
- rc = NOT_FOUND;
- for (iVpdType=0; iVpdType<NUM_OF_VPD_TYPES; iVpdType++) {
+ rc = NOT_FOUND;
+ for (iVpdType=0; iVpdType<NUM_OF_VPD_TYPES; iVpdType++) {
if (iVpdType==0) {
- ring_id_list = (RingIdList*)RING_ID_LIST_PG;
- ring_id_list_size = (uint32_t)RING_ID_LIST_PG_SIZE;
- }
- else {
- ring_id_list = (RingIdList*)RING_ID_LIST_PR;
- ring_id_list_size = (uint32_t)RING_ID_LIST_PR_SIZE;
- }
- // Search the MVPD reference lists.
- if (i_ringName) {
- for (iRing=0; iRing<ring_id_list_size; iRing++) {
- if (strcmp((ring_id_list+iRing)->ringNameImg,i_ringName)==0) {
- *i_ringIdList = ring_id_list+iRing;
- return FOUND;
- }
- }
- }
- // Since ringName was not supplied, search for ringId.
- // 2012-11-12: TBD.
+ ring_id_list = (RingIdList*)RING_ID_LIST_PG;
+ ring_id_list_size = (uint32_t)RING_ID_LIST_PG_SIZE;
+ }
+ else {
+ ring_id_list = (RingIdList*)RING_ID_LIST_PR;
+ ring_id_list_size = (uint32_t)RING_ID_LIST_PR_SIZE;
+ }
+ // Search the MVPD reference lists for either a:
+ // - ringName match with or w/o _image in the name, or
+ // - ringId match.
+ if (i_ringName) {
+ // Search for ringName match.
+ for (iRing=0; iRing<ring_id_list_size; iRing++) {
+ if ( strcmp((ring_id_list+iRing)->ringName, i_ringName)==0 ||
+ strcmp((ring_id_list+iRing)->ringNameImg,i_ringName)==0 ) {
+ *i_ringIdList = ring_id_list+iRing;
+ return FOUND;
+ }
+ }
+ }
+ else {
+ // Search for ringId match (since ringName was not supplied).
+ for (iRing=0; iRing<ring_id_list_size; iRing++) {
+ if ((ring_id_list+iRing)->ringId==i_ringId) {
+ *i_ringIdList = ring_id_list+iRing;
+ return FOUND;
+ }
+ }
+ }
- }
- return rc;
+ }
+ return rc;
}
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pgas.h b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pgas.h
index ad0a99d78..f17f5395c 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pgas.h
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pgas.h
@@ -5,7 +5,7 @@
/* */
/* IBM CONFIDENTIAL */
/* */
-/* COPYRIGHT International Business Machines Corp. 2012 */
+/* COPYRIGHT International Business Machines Corp. 2012,2013 */
/* */
/* p1 */
/* */
@@ -25,7 +25,7 @@
#define __PGAS__
-// $Id: pgas.h,v 1.17 2012/08/15 19:24:49 bcbrock Exp $
+// $Id: pgas.h,v 1.19 2013/01/09 23:01:38 bcbrock Exp $
// ** WARNING : This file is maintained as part of the OCC firmware. Do **
// ** not edit this file in the PMX area, the hardware procedure area, **
@@ -339,6 +339,14 @@
.endif
.endm
+ // A register pair required to be A0, A1 in any order
+ .macro ..axay, reg1, reg2, err="Expecting A0, A1 in either order"
+ .if !((((\reg1) == A0) && ((\reg2) == A1)) || \
+ (((\reg1) == A1) && ((\reg2) == A0)))
+ .error "\err"
+ .endif
+ .endm
+
// A register pair required to be the same register
.macro ..same, dest, src
@@ -750,6 +758,60 @@
.endm
+ //////////////////////////////////////////////////////////////////////
+ // EXTRPRC - Extract and right-justify the PIB/PCB return code
+ // TPRCB[N]Z - Test PIB return code and branch if [not] zero
+ // TPRCBGT - Test PIB return code and branch if greater-than
+ // TPRCBLE - Test PIB return code and branch if less-then or equal
+ //////////////////////////////////////////////////////////////////////
+ //
+ // To support cases where PORE code expects or must explicitly handle
+ // non-0 PIB return codes, the PIB return code and parity indication
+ // are stored in bits 32 (parity) and 33-35 (return code) of the IFR.
+ // These macros extract the four PIB/PCB status bits from the IFR and
+ // right-justifies them into the data register provided. For EXTRPRC
+ // that is the total function of the macro. The TPRCB[N]Z macros
+ // provide a simple non-destructive test and branch for zero (success)
+ // and non-zero (potential problem) codes after the extraction.
+ //
+ // In complex error handling scenarios one would typically compare the
+ // PIB return code against an upper-bound, e.g., the offline response
+ // (0x2), and then take further action. If the parity error bit is set
+ // then this would produce an aggregate "return code" higher than any
+ // that one would typically want to ignore. The TPRCBGT/TPRCBLE macros
+ // provide this function; however the test destroys the extracted
+ // return code so that if further analysis is required the code will
+ // need to be a extracted again.
+ //////////////////////////////////////////////////////////////////////
+
+ .macro extrprc, dest:req
+ ..data (\dest)
+ mr (\dest), IFR
+ extrdi (\dest), (\dest), 4, 32
+ .endm
+
+ .macro tprcbz, dest:req, target:req
+ extrprc (\dest)
+ braz (\dest), (\target)
+ .endm
+
+ .macro tprcbnz, dest:req, target:req
+ extrprc (\dest)
+ branz (\dest), (\target)
+ .endm
+
+ .macro tprcbgt, dest:req, target:req, bound:req
+ extrprc (\dest)
+ subs (\dest), (\dest), (\bound)
+ tfbugt (\dest), (\target)
+ .endm
+
+ .macro tprcble, dest:req, target:req, bound:req
+ extrprc (\dest)
+ subs (\dest), (\dest), (\bound)
+ tfbule (\dest), (\target)
+ .endm
+
//////////////////////////////////////////////////////////////////////
// LPCS - Load Pervasive Chiplet from Scom address
//////////////////////////////////////////////////////////////////////
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_bitmanip.H b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_bitmanip.H
index b9ba60424..337d9e63e 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_bitmanip.H
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_bitmanip.H
@@ -1,26 +1,25 @@
-/* IBM_PROLOG_BEGIN_TAG
- * This is an automatically generated prolog.
- *
- * $Source: src/usr/hwpf/hwp/build_winkle_images/proc_slw_build/pore_bitmanip.H $
- *
- * IBM CONFIDENTIAL
- *
- * COPYRIGHT International Business Machines Corp. 2012
- *
- * p1
- *
- * Object Code Only (OCO) source materials
- * Licensed Internal Code Source Materials
- * IBM HostBoot Licensed Internal Code
- *
- * The source code for this program is not published or other-
- * wise divested of its trade secrets, irrespective of what has
- * been deposited with the U.S. Copyright Office.
- *
- * Origin: 30
- *
- * IBM_PROLOG_END_TAG
- */
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_bitmanip.H $ */
+/* */
+/* IBM CONFIDENTIAL */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2013 */
+/* */
+/* p1 */
+/* */
+/* Object Code Only (OCO) source materials */
+/* Licensed Internal Code Source Materials */
+/* IBM HostBoot Licensed Internal Code */
+/* */
+/* The source code for this program is not published or otherwise */
+/* divested of its trade secrets, irrespective of what has been */
+/* deposited with the U.S. Copyright Office. */
+/* */
+/* Origin: 30 */
+/* */
+/* IBM_PROLOG_END_TAG */
#ifndef __PORE_BITMANIP_H
#define __PORE_BITMANIP_H
@@ -354,6 +353,34 @@
.endm
+/// Destructively insert a right-justified immediate value into a bit field
+/// read out from a scom address
+///
+/// \param[out] dest The destination Data register (D0/D1) to be modified.
+///
+/// \param[in] b The bit positon (64-bit, big-endian) where the bit field
+/// begins.
+///
+/// \param[in] n The number of contiguous bits beginning at bit \a b to
+/// modify
+///
+/// \param[in] n The scom address to read
+///
+/// \param[in] n The pervasive base register contain the correct base for
+/// the address
+///
+/// The execution of this macro computes:
+///
+/// - dest <- (dest & ~BITS(b, n)) | ((imm & BITS(64 - n, n)) << (64 - n - b))
+
+ .macro insertbitsscom, dest:req, address:req, prv:req, b:req, n:req, imm:req
+ ..checkbits (\b), (\n)
+ ldandi (\dest), (\address), (\prv), ~BITS((\b), (\n))
+ ori (\dest), (\dest), \
+ (((\imm) & BITS(64 - (\n), (\n))) << ((64 - (\n) - (\b))))
+ .endm
+
+
/// Poll for a bit to be set in a SCOM register with timeout
///
/// \param[in] dest A Data register (D0/D1) to use for the polling
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline.h b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline.h
index 50dd6b7f9..23a5863ba 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline.h
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline.h
@@ -5,7 +5,7 @@
/* */
/* IBM CONFIDENTIAL */
/* */
-/* COPYRIGHT International Business Machines Corp. 2012 */
+/* COPYRIGHT International Business Machines Corp. 2012,2013 */
/* */
/* p1 */
/* */
@@ -23,7 +23,7 @@
#ifndef __PORE_INLINE_H__
#define __PORE_INLINE_H__
-// $Id: pore_inline.h,v 1.16 2012/11/12 19:54:15 bcbrock Exp $
+// $Id: pore_inline.h,v 1.18 2013/02/06 01:10:35 bcbrock Exp $
// ** WARNING : This file is maintained as part of the OCC firmware. Do **
// ** not edit this file in the PMX area or the hardware procedure area **
@@ -53,6 +53,19 @@ extern "C" {
#ifndef __ASSEMBLER__
+// PHYP tools do not support 'static' functions and variables as it interferes
+// with their concurrent patch methodology. So when compiling for PHYP the
+// PORE instruction "macros" are simply declared "inline". This also extends
+// into the implementation C files - so under PHYP all previosuly local static
+// functions will now be global functions. We retain 'static' to reduce code
+// size and improve abstraction for OCC applications.
+
+#ifdef PPC_HYP
+#define PORE_STATIC
+#else
+#define PORE_STATIC static
+#endif
+
/// Error code strings from the PORE inline assembler/disassembler
///
/// The PoreInlineContext object stores error codes that occur during
@@ -543,25 +556,25 @@ pore_inline_disassemble(PoreInlineContext *ctx, PoreInlineDisassembly *dis);
// Native PORE instruction assembly, using PGAS opcode names and operand
-// ordering rules.
+// ordering rules.
// NOP, TRAP, RET
-static inline int
+PORE_STATIC inline int
pore_NOP(PoreInlineContext *ctx)
{
return pore_inline_instruction1(ctx, PGAS_OPCODE_NOP, 0);
}
-static inline int
+PORE_STATIC inline int
pore_TRAP(PoreInlineContext *ctx)
{
return pore_inline_instruction1(ctx, PGAS_OPCODE_TRAP, 0);
}
-static inline int
+PORE_STATIC inline int
pore_RET(PoreInlineContext *ctx)
{
return pore_inline_instruction1(ctx, PGAS_OPCODE_RET, 0);
@@ -573,7 +586,7 @@ pore_RET(PoreInlineContext *ctx)
int
pore_WAITS(PoreInlineContext *ctx, uint32_t cycles);
-static inline int
+PORE_STATIC inline int
pore_HALT(PoreInlineContext *ctx)
{
return pore_inline_instruction1(ctx, PGAS_OPCODE_WAITS, 0);
@@ -585,19 +598,19 @@ pore_HOOKI(PoreInlineContext *ctx, uint32_t index, uint64_t imm);
// BRA, BSR, LOOP
-static inline int
+PORE_STATIC inline int
pore_BRA(PoreInlineContext *ctx, PoreInlineLocation target)
{
return pore_inline_bra(ctx, PGAS_OPCODE_BRA, target);
}
-static inline int
+PORE_STATIC inline int
pore_BSR(PoreInlineContext *ctx, PoreInlineLocation target)
{
return pore_inline_bra(ctx, PGAS_OPCODE_BSR, target);
}
-static inline int
+PORE_STATIC inline int
pore_LOOP(PoreInlineContext *ctx, PoreInlineLocation target)
{
return pore_inline_bra(ctx, PGAS_OPCODE_LOOP, target);
@@ -606,14 +619,14 @@ pore_LOOP(PoreInlineContext *ctx, PoreInlineLocation target)
// BRAZ, BRANZ
-static inline int
+PORE_STATIC inline int
pore_BRAZ(PoreInlineContext *ctx, int reg, PoreInlineLocation target)
{
return pore_inline_brac(ctx, PGAS_OPCODE_BRAZ, reg, target);
}
-static inline int
+PORE_STATIC inline int
pore_BRANZ(PoreInlineContext *ctx, int reg, PoreInlineLocation target)
{
return pore_inline_brac(ctx, PGAS_OPCODE_BRANZ, reg, target);
@@ -622,7 +635,7 @@ pore_BRANZ(PoreInlineContext *ctx, int reg, PoreInlineLocation target)
// CMPIBRAEQ, CMPIBRANE, CMPIBSREQ
-static inline int
+PORE_STATIC inline int
pore_CMPIBRAEQ(PoreInlineContext *ctx,
int reg, PoreInlineLocation target, uint64_t imm)
{
@@ -630,7 +643,7 @@ pore_CMPIBRAEQ(PoreInlineContext *ctx,
}
-static inline int
+PORE_STATIC inline int
pore_CMPIBRANE(PoreInlineContext *ctx,
int reg, PoreInlineLocation target, uint64_t imm)
{
@@ -638,7 +651,7 @@ pore_CMPIBRANE(PoreInlineContext *ctx,
}
-static inline int
+PORE_STATIC inline int
pore_CMPIBSREQ(PoreInlineContext *ctx,
int reg, PoreInlineLocation target, uint64_t imm)
{
@@ -648,12 +661,12 @@ pore_CMPIBSREQ(PoreInlineContext *ctx,
// BRAD, BSRD
-static inline int
+PORE_STATIC inline int
pore_BRAD(PoreInlineContext *ctx, int reg) {
return pore_inline_brad(ctx, PGAS_OPCODE_BRAD, reg);
}
-static inline int
+PORE_STATIC inline int
pore_BSRD(PoreInlineContext *ctx, int reg) {
return pore_inline_brad(ctx, PGAS_OPCODE_BSRD, reg);
}
@@ -661,19 +674,19 @@ pore_BSRD(PoreInlineContext *ctx, int reg) {
// ANDI, ORI, XORI
-static inline int
+PORE_STATIC inline int
pore_ANDI(PoreInlineContext *ctx, int dest, int src, uint64_t imm)
{
return pore_inline_ilogic(ctx, PGAS_OPCODE_ANDI, dest, src, imm);
}
-static inline int
+PORE_STATIC inline int
pore_ORI(PoreInlineContext *ctx, int dest, int src, uint64_t imm)
{
return pore_inline_ilogic(ctx, PGAS_OPCODE_ORI, dest, src, imm);
}
-static inline int
+PORE_STATIC inline int
pore_XORI(PoreInlineContext *ctx, int dest, int src, uint64_t imm)
{
return pore_inline_ilogic(ctx, PGAS_OPCODE_XORI, dest, src, imm);
@@ -682,31 +695,31 @@ pore_XORI(PoreInlineContext *ctx, int dest, int src, uint64_t imm)
// AND, OR, XOR, ADD, SUB
-static inline int
+PORE_STATIC inline int
pore_AND(PoreInlineContext *ctx, int dest, int src1, int src2)
{
return pore_inline_alurr(ctx, PGAS_OPCODE_AND, dest, src1, src2);
}
-static inline int
+PORE_STATIC inline int
pore_OR(PoreInlineContext *ctx, int dest, int src1, int src2)
{
return pore_inline_alurr(ctx, PGAS_OPCODE_OR, dest, src1, src2);
}
-static inline int
+PORE_STATIC inline int
pore_XOR(PoreInlineContext *ctx, int dest, int src1, int src2)
{
return pore_inline_alurr(ctx, PGAS_OPCODE_XOR, dest, src1, src2);
}
-static inline int
+PORE_STATIC inline int
pore_ADD(PoreInlineContext *ctx, int dest, int src1, int src2)
{
return pore_inline_alurr(ctx, PGAS_OPCODE_ADD, dest, src1, src2);
}
-static inline int
+PORE_STATIC inline int
pore_SUB(PoreInlineContext *ctx, int dest, int src1, int src2)
{
return pore_inline_alurr(ctx, PGAS_OPCODE_SUB, dest, src1, src2);
@@ -715,13 +728,13 @@ pore_SUB(PoreInlineContext *ctx, int dest, int src1, int src2)
// ADDS, SUBS
-static inline int
+PORE_STATIC inline int
pore_ADDS(PoreInlineContext *ctx, int dest, int src, int imm)
{
return pore_inline_adds(ctx, PGAS_OPCODE_ADDS, dest, src, imm);
}
-static inline int
+PORE_STATIC inline int
pore_SUBS(PoreInlineContext *ctx, int dest, int src, int imm)
{
return pore_inline_adds(ctx, PGAS_OPCODE_SUBS, dest, src, imm);
@@ -748,7 +761,7 @@ pore_LI(PoreInlineContext *ctx, int dest, uint64_t imm);
// LD, LDANDI, STD, STI, BSI, BCI
-static inline int
+PORE_STATIC inline int
pore_LD(PoreInlineContext *ctx, int dest, int32_t offset, int base)
{
return
@@ -756,7 +769,7 @@ pore_LD(PoreInlineContext *ctx, int dest, int32_t offset, int base)
PORE_INLINE_PSEUDO_LD, dest, offset, base, 0);
}
-static inline int
+PORE_STATIC inline int
pore_LDANDI(PoreInlineContext *ctx,
int dest, int32_t offset, int base, uint64_t imm)
{
@@ -766,7 +779,7 @@ pore_LDANDI(PoreInlineContext *ctx,
dest, offset, base, imm);
}
-static inline int
+PORE_STATIC inline int
pore_STD(PoreInlineContext *ctx, int src, int32_t offset, int base)
{
return
@@ -774,7 +787,7 @@ pore_STD(PoreInlineContext *ctx, int src, int32_t offset, int base)
PORE_INLINE_PSEUDO_STD, src, offset, base, 0);
}
-static inline int
+PORE_STATIC inline int
pore_STI(PoreInlineContext *ctx, int32_t offset, int base, uint64_t imm)
{
return
@@ -782,7 +795,7 @@ pore_STI(PoreInlineContext *ctx, int32_t offset, int base, uint64_t imm)
PGAS_OPCODE_STI, 0, offset, base, imm);
}
-static inline int
+PORE_STATIC inline int
pore_BSI(PoreInlineContext *ctx,
int src, int32_t offset, int base, uint64_t imm)
{
@@ -791,7 +804,7 @@ pore_BSI(PoreInlineContext *ctx,
PGAS_OPCODE_BSI, src, offset, base, imm);
}
-static inline int
+PORE_STATIC inline int
pore_BCI(PoreInlineContext *ctx,
int src, int32_t offset, int base, uint64_t imm)
{
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline_assembler.c b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline_assembler.c
index 096595a78..492d25bd4 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline_assembler.c
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline_assembler.c
@@ -5,7 +5,7 @@
/* */
/* IBM CONFIDENTIAL */
/* */
-/* COPYRIGHT International Business Machines Corp. 2012 */
+/* COPYRIGHT International Business Machines Corp. 2012,2013 */
/* */
/* p1 */
/* */
@@ -20,7 +20,7 @@
/* Origin: 30 */
/* */
/* IBM_PROLOG_END_TAG */
-// $Id: pore_inline_assembler.c,v 1.15 2012/11/12 19:54:15 bcbrock Exp $
+// $Id: pore_inline_assembler.c,v 1.18 2013/02/06 17:41:51 bcbrock Exp $
// ** WARNING : This file is maintained as part of the OCC firmware. Do **
// ** not edit this file in the PMX area or the hardware procedure area **
@@ -368,6 +368,9 @@
/// section will be disassembled as data. For complete information see the
/// documentation for pore_inline_disassemble().
+#ifdef PPC_HYP
+#include <HvPlicModule.H>
+#endif
#define __PORE_INLINE_ASSEMBLER_C__
#include "pore_inline.h"
@@ -376,7 +379,7 @@
// Definitions of PORE register classes. These are predicates that return
// 1 if the register is a member of the class, else 0.
-static int
+PORE_STATIC int
pore_data(int reg)
{
return
@@ -385,7 +388,7 @@ pore_data(int reg)
}
-static int
+PORE_STATIC int
pore_address(int reg)
{
return
@@ -394,7 +397,7 @@ pore_address(int reg)
}
-static int
+PORE_STATIC int
pore_pervasive_chiplet_id(int reg)
{
return
@@ -403,7 +406,7 @@ pore_pervasive_chiplet_id(int reg)
}
-static int
+PORE_STATIC int
pore_branch_compare_data(int reg)
{
return
@@ -413,7 +416,7 @@ pore_branch_compare_data(int reg)
}
-static int
+PORE_STATIC int
pore_ls_destination(int reg)
{
return
@@ -427,7 +430,7 @@ pore_ls_destination(int reg)
}
-static int
+PORE_STATIC int
pore_li_destination(int reg)
{
return
@@ -441,7 +444,7 @@ pore_li_destination(int reg)
}
-static int
+PORE_STATIC int
pore_mr_source(int reg)
{
return
@@ -459,7 +462,7 @@ pore_mr_source(int reg)
(reg == EMR);
}
-static int
+PORE_STATIC int
pore_mr_destination(int reg)
{
return
@@ -564,12 +567,11 @@ pore_inline_host64(unsigned long p)
// 32-bit population count
//
-// This is a well-known divide-and-conquer algorithm, e.g. look on Wikipedia
-// under "Hamming Weight". The idea is to compute sums of adjacent bit
-// segments in parallel, in place.
+// This is a well-known divide-and-conquer algorithm. The idea is to compute
+// sums of adjacent bit segments in parallel, in place.
-static int
-popcount32(uint32_t x)
+PORE_STATIC int
+pore_popcount32(uint32_t x)
{
uint32_t m1 = 0x55555555;
uint32_t m2 = 0x33333333;
@@ -584,10 +586,10 @@ popcount32(uint32_t x)
// 64-bit population count
-static int
-popcount64(uint64_t x)
+PORE_STATIC int
+pore_popcount64(uint64_t x)
{
- return popcount32(x & 0xffffffff) + popcount32(x >> 32);
+ return pore_popcount32(x & 0xffffffff) + pore_popcount32(x >> 32);
}
@@ -596,7 +598,7 @@ popcount64(uint64_t x)
int
pore_inline_parity(uint32_t instruction, uint64_t imd64)
{
- return (popcount32(instruction) + popcount64(imd64)) % 2;
+ return (pore_popcount32(instruction) + pore_popcount64(imd64)) % 2;
}
@@ -799,7 +801,7 @@ pore_inline_context_bump(PoreInlineContext *ctx, size_t bytes)
// Allocation failure sets the context error code to either
// PORE_INLINE_NO_MEMORY or PORE_INLINE_ALIGNMENT_ERROR.
-static unsigned long
+PORE_STATIC unsigned long
pore_inline_allocate(PoreInlineContext *ctx, size_t bytes)
{
unsigned long p = 0;
@@ -1229,7 +1231,7 @@ pore_LI(PoreInlineContext *ctx, int dest, uint64_t imm)
// LD, LDANDI, STD, STI, BSI, BCI
-static void
+PORE_STATIC void
pervasive_ima24(PoreInlineContext *ctx,
int opcode, uint32_t offset, int base, uint64_t imm)
{
@@ -1254,7 +1256,7 @@ pervasive_ima24(PoreInlineContext *ctx,
}
-static void
+PORE_STATIC void
memory_ima24(PoreInlineContext *ctx,
int opcode, uint32_t offset, int base, uint64_t imm)
{
@@ -1281,7 +1283,7 @@ memory_ima24(PoreInlineContext *ctx,
}
-static void
+PORE_STATIC void
ima24(PoreInlineContext *ctx,
int opcode, uint32_t offset, int base, uint64_t imm)
{
@@ -1294,7 +1296,6 @@ ima24(PoreInlineContext *ctx,
}
}
-#include <stdio.h>
int
pore_inline_load_store(PoreInlineContext *ctx,
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.c b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.c
index 0160b7bd9..0e78bf540 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.c
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.c
@@ -5,7 +5,7 @@
/* */
/* IBM CONFIDENTIAL */
/* */
-/* COPYRIGHT International Business Machines Corp. 2012 */
+/* COPYRIGHT International Business Machines Corp. 2012,2013 */
/* */
/* p1 */
/* */
@@ -20,7 +20,7 @@
/* Origin: 30 */
/* */
/* IBM_PROLOG_END_TAG */
-// $Id: sbe_xip_image.c,v 1.21 2012/09/18 20:16:49 bcbrock Exp $
+// $Id: sbe_xip_image.c,v 1.26 2013/03/13 23:28:17 cmolsen Exp $
// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ipl/sbe/sbe_xip_image.c,v $
//-----------------------------------------------------------------------------
// *! (C) Copyright International Business Machines Corp. 2011
@@ -44,6 +44,10 @@
/// ensure that no memory outside of the putative bounds of the image is ever
/// referenced during validation.
+#ifdef PPC_HYP
+#include <HvPlicModule.H>
+#endif
+
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
@@ -55,6 +59,29 @@
// Local Functions
////////////////////////////////////////////////////////////////////////////
+// PHYP has their own way of implementing the <string.h> functions. PHYP also
+// does not allow static functions or data, so all of the XIP_STATIC functions
+// defined here are global to PHYP.
+
+#ifdef PPC_HYP
+
+#ifndef _HVTYPES_H
+#include <HvTypes.H>
+#endif
+
+#define strcpy(dest, src) hvstrcpy(dest, src)
+#define strlen(s) hvstrlen(s)
+#define strcmp(s1, s2) hvstrcmp(s1, s2)
+
+#define XIP_STATIC
+
+#else // PPC_HYP
+
+#define XIP_STATIC static
+
+#endif // PPC_HYP
+
+
#ifdef DEBUG_SBE_XIP_IMAGE
// Debugging support, normally disabled. All of the formatted I/O you see in
@@ -83,7 +110,7 @@
#define F0x016llx "0x%016" PRIx64
#define F0x012llx "0x%012" PRIx64
-static SBE_XIP_ERROR_STRINGS(sbe_xip_error_strings);
+XIP_STATIC SBE_XIP_ERROR_STRINGS(sbe_xip_error_strings);
#define TRACE_ERROR(x) \
({ \
@@ -106,11 +133,11 @@ static SBE_XIP_ERROR_STRINGS(sbe_xip_error_strings);
#if 0
-static uint32_t revle32(const uint32_t i_x);
+XIP_STATIC uint32_t xipRevLe32(const uint32_t i_x);
-static SBE_XIP_TYPE_STRINGS(type_strings);
+XIP_STATIC SBE_XIP_TYPE_STRINGS(type_strings);
-static void
+XIP_STATIC void
dumpToc(int index, SbeXipToc* toc)
{
printf("TOC entry %d @ %p\n"
@@ -120,8 +147,8 @@ dumpToc(int index, SbeXipToc* toc)
" iv_section = 0x%02x\n"
" iv_elements = %d\n",
index, toc,
- revle32(toc->iv_id),
- revle32(toc->iv_data),
+ xipRevLe32(toc->iv_id),
+ xipRevLe32(toc->iv_data),
SBE_XIP_TYPE_STRING(type_strings, toc->iv_type),
toc->iv_section,
toc->iv_elements);
@@ -131,7 +158,7 @@ dumpToc(int index, SbeXipToc* toc)
#if 0
-static void
+XIP_STATIC void
dumpItem(SbeXipItem* item)
{
printf("SbeXipItem @ %p\n"
@@ -153,7 +180,7 @@ dumpItem(SbeXipItem* item)
#endif /* 0 */
-static void
+XIP_STATIC void
dumpSectionTable(const void* i_image)
{
int i, rc;
@@ -193,8 +220,8 @@ dumpSectionTable(const void* i_image)
/// Byte-reverse a 16-bit integer if on a little-endian machine
-static uint16_t
-revle16(const uint16_t i_x)
+XIP_STATIC uint16_t
+xipRevLe16(const uint16_t i_x)
{
uint16_t rx;
@@ -214,8 +241,8 @@ revle16(const uint16_t i_x)
/// Byte-reverse a 32-bit integer if on a little-endian machine
-static uint32_t
-revle32(const uint32_t i_x)
+XIP_STATIC uint32_t
+xipRevLe32(const uint32_t i_x)
{
uint32_t rx;
@@ -237,8 +264,8 @@ revle32(const uint32_t i_x)
/// Byte-reverse a 64-bit integer if on a little-endian machine
-static uint64_t
-revle64(const uint64_t i_x)
+XIP_STATIC uint64_t
+xipRevLe64(const uint64_t i_x)
{
uint64_t rx;
@@ -264,52 +291,52 @@ revle64(const uint64_t i_x)
/// What is the image link address?
-static uint64_t
-linkAddress(const void* i_image)
+XIP_STATIC uint64_t
+xipLinkAddress(const void* i_image)
{
- return revle64(((SbeXipHeader*)i_image)->iv_linkAddress);
+ return xipRevLe64(((SbeXipHeader*)i_image)->iv_linkAddress);
}
/// What is the image size?
-static uint32_t
-imageSize(const void* i_image)
+XIP_STATIC uint32_t
+xipImageSize(const void* i_image)
{
- return revle32(((SbeXipHeader*)i_image)->iv_imageSize);
+ return xipRevLe32(((SbeXipHeader*)i_image)->iv_imageSize);
}
/// Set the image size
-static void
-setImageSize(void* io_image, const size_t i_size)
+XIP_STATIC void
+xipSetImageSize(void* io_image, const size_t i_size)
{
- ((SbeXipHeader*)io_image)->iv_imageSize = revle32(i_size);
+ ((SbeXipHeader*)io_image)->iv_imageSize = xipRevLe32(i_size);
}
/// Re-establish the required final alignment
-static void
-finalAlignment(void* io_image)
+XIP_STATIC void
+xipFinalAlignment(void* io_image)
{
uint32_t size;
- size = imageSize(io_image);
+ size = xipImageSize(io_image);
if ((size % SBE_XIP_FINAL_ALIGNMENT) != 0) {
- setImageSize(io_image,
- size + (SBE_XIP_FINAL_ALIGNMENT -
- (size % SBE_XIP_FINAL_ALIGNMENT)));
+ xipSetImageSize(io_image,
+ size + (SBE_XIP_FINAL_ALIGNMENT -
+ (size % SBE_XIP_FINAL_ALIGNMENT)));
}
}
/// Compute a host address from an image address and offset
-static void*
-hostAddressFromOffset(const void* i_image, const uint32_t offset)
+XIP_STATIC void*
+xipHostAddressFromOffset(const void* i_image, const uint32_t offset)
{
return (void*)((unsigned long)i_image + offset);
}
@@ -317,29 +344,34 @@ hostAddressFromOffset(const void* i_image, const uint32_t offset)
/// Convert a PORE address to a host address
-static void*
-pore2Host(const void* i_image, const uint64_t i_poreAddress)
+XIP_STATIC void*
+xipPore2Host(const void* i_image, const uint64_t i_poreAddress)
{
- return hostAddressFromOffset(i_image,
- i_poreAddress - linkAddress(i_image));
+ return xipHostAddressFromOffset(i_image,
+ i_poreAddress - xipLinkAddress(i_image));
}
-static int
-validatePoreAddress(const void* i_image,
- const uint64_t i_poreAddress,
- const uint32_t size)
+XIP_STATIC int
+xipValidatePoreAddress(const void* i_image,
+ const uint64_t i_poreAddress,
+ const uint32_t size)
{
int rc;
- if ((i_poreAddress < linkAddress(i_image)) ||
- (i_poreAddress > (linkAddress(i_image) + imageSize(i_image) - size))) {
+ if ((i_poreAddress < xipLinkAddress(i_image)) ||
+ (i_poreAddress > (xipLinkAddress(i_image) +
+ xipImageSize(i_image) -
+ size))) {
rc = TRACE_ERRORX(SBE_XIP_INVALID_ARGUMENT,
- "The PORE address " F0x012llx " is outside the bounds "
- "of the image (" F0x012llx ":" F0x012llx ") for %u-byte access.\n",
+ "The PORE address " F0x012llx
+ " is outside the bounds "
+ "of the image ("
+ F0x012llx ":" F0x012llx
+ ") for %u-byte access.\n",
i_poreAddress,
- linkAddress(i_image),
- linkAddress(i_image) + imageSize(i_image) - 1,
+ xipLinkAddress(i_image),
+ xipLinkAddress(i_image) + xipImageSize(i_image) - 1,
size);
} else {
rc = 0;
@@ -350,17 +382,17 @@ validatePoreAddress(const void* i_image,
/// Get the magic number from the image
-static uint64_t
-magic(const void* i_image)
+XIP_STATIC uint64_t
+xipMagic(const void* i_image)
{
- return revle64(((SbeXipHeader*)i_image)->iv_magic);
+ return xipRevLe64(((SbeXipHeader*)i_image)->iv_magic);
}
/// Get the header version from the image
-static uint8_t
-headerVersion(const void* i_image)
+XIP_STATIC uint8_t
+xipHeaderVersion(const void* i_image)
{
return ((SbeXipHeader*)i_image)->iv_headerVersion;
}
@@ -368,8 +400,8 @@ headerVersion(const void* i_image)
/// Has the image been normalized?
-static uint8_t
-normalized(const void* i_image)
+XIP_STATIC uint8_t
+xipNormalized(const void* i_image)
{
return ((SbeXipHeader*)i_image)->iv_normalized;
}
@@ -377,8 +409,8 @@ normalized(const void* i_image)
/// Has the image TOC been sorted?
-static uint8_t
-sorted(const void* i_image)
+XIP_STATIC uint8_t
+xipSorted(const void* i_image)
{
return ((SbeXipHeader*)i_image)->iv_tocSorted;
}
@@ -387,8 +419,8 @@ sorted(const void* i_image)
/// A quick check that the image exists, has the correct magic and header
/// version, and optionally is normalized.
-static int
-quickCheck(const void* i_image, const int i_normalizationRequired)
+XIP_STATIC int
+xipQuickCheck(const void* i_image, const int i_normalizationRequired)
{
int rc;
@@ -400,21 +432,22 @@ quickCheck(const void* i_image, const int i_normalizationRequired)
"Image pointer is NULL (0)\n");
break;
}
- if ((magic(i_image) >> 32) != SBE_XIP_MAGIC) {
+ if ((xipMagic(i_image) >> 32) != SBE_XIP_MAGIC) {
rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
"Magic number mismatch; Found "
"" F0x016llx ", expected 0x%08x........\n",
- magic(i_image), SBE_XIP_MAGIC);
+ xipMagic(i_image), SBE_XIP_MAGIC);
break;
}
- if ((headerVersion(i_image)) != SBE_XIP_HEADER_VERSION) {
+ if ((xipHeaderVersion(i_image)) != SBE_XIP_HEADER_VERSION) {
rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
"Header version mismatch; Expecting %d, "
"found %d\n",
- SBE_XIP_HEADER_VERSION, headerVersion(i_image));
+ SBE_XIP_HEADER_VERSION,
+ xipHeaderVersion(i_image));
break;
}
- if (i_normalizationRequired && !normalized(i_image)) {
+ if (i_normalizationRequired && !xipNormalized(i_image)) {
rc = TRACE_ERRORX(SBE_XIP_NOT_NORMALIZED,
"Image not normalized\n");
break;
@@ -427,17 +460,17 @@ quickCheck(const void* i_image, const int i_normalizationRequired)
/// Convert a 32-bit relocatable offset to a full PORE 48-bit address
-static uint64_t
-fullAddress(const void* i_image, uint32_t offset)
+XIP_STATIC uint64_t
+xipFullAddress(const void* i_image, uint32_t offset)
{
- return (linkAddress(i_image) & 0x0000ffff00000000ull) + offset;
+ return (xipLinkAddress(i_image) & 0x0000ffff00000000ull) + offset;
}
/// Translate a section table entry
-static void
-translateSection(SbeXipSection* o_dest, const SbeXipSection* i_src)
+XIP_STATIC void
+xipTranslateSection(SbeXipSection* o_dest, const SbeXipSection* i_src)
{
#ifndef _BIG_ENDIAN
@@ -445,8 +478,8 @@ translateSection(SbeXipSection* o_dest, const SbeXipSection* i_src)
#error This code assumes the SBE-XIP header version 8 layout
#endif
- o_dest->iv_offset = revle32(i_src->iv_offset);
- o_dest->iv_size = revle32(i_src->iv_size);
+ o_dest->iv_offset = xipRevLe32(i_src->iv_offset);
+ o_dest->iv_size = xipRevLe32(i_src->iv_size);
o_dest->iv_alignment = i_src->iv_alignment;
o_dest->iv_reserved8[0] = 0;
o_dest->iv_reserved8[1] = 0;
@@ -461,8 +494,8 @@ translateSection(SbeXipSection* o_dest, const SbeXipSection* i_src)
/// Translate a TOC entry
-static void
-translateToc(SbeXipToc* o_dest, SbeXipToc* i_src)
+XIP_STATIC void
+xipTranslateToc(SbeXipToc* o_dest, SbeXipToc* i_src)
{
#ifndef _BIG_ENDIAN
@@ -470,8 +503,8 @@ translateToc(SbeXipToc* o_dest, SbeXipToc* i_src)
#error This code assumes the SBE-XIP header version 8 layout
#endif
- o_dest->iv_id = revle32(i_src->iv_id);
- o_dest->iv_data = revle32(i_src->iv_data);
+ o_dest->iv_id = xipRevLe32(i_src->iv_id);
+ o_dest->iv_data = xipRevLe32(i_src->iv_data);
o_dest->iv_type = i_src->iv_type;
o_dest->iv_section = i_src->iv_section;
o_dest->iv_elements = i_src->iv_elements;
@@ -486,8 +519,8 @@ translateToc(SbeXipToc* o_dest, SbeXipToc* i_src)
/// Find the final (highest-address) section of the image
-static int
-finalSection(const void* i_image, int* o_sectionId)
+XIP_STATIC int
+xipFinalSection(const void* i_image, int* o_sectionId)
{
int i, rc;
uint32_t offset;
@@ -514,10 +547,10 @@ finalSection(const void* i_image, int* o_sectionId)
/// Return a pointer to an image-format section table entry
-static int
-getSectionPointer(const void* i_image,
- const int i_sectionId,
- SbeXipSection** o_imageSection)
+XIP_STATIC int
+xipGetSectionPointer(const void* i_image,
+ const int i_sectionId,
+ SbeXipSection** o_imageSection)
{
int rc;
@@ -534,18 +567,18 @@ getSectionPointer(const void* i_image,
/// Restore a section table entry from host format to image format.
-static int
-putSection(const void* i_image,
- const int i_sectionId,
- SbeXipSection* i_hostSection)
+XIP_STATIC int
+xipPutSection(const void* i_image,
+ const int i_sectionId,
+ SbeXipSection* i_hostSection)
{
int rc;
SbeXipSection *imageSection;
- rc = getSectionPointer(i_image, i_sectionId, &imageSection);
+ rc = xipGetSectionPointer(i_image, i_sectionId, &imageSection);
if (!rc) {
- translateSection(imageSection, i_hostSection);
+ xipTranslateSection(imageSection, i_hostSection);
}
return rc;
@@ -554,15 +587,16 @@ putSection(const void* i_image,
/// Set the offset of a section
-static int
-setSectionOffset(void* io_image, const int i_section, const uint32_t i_offset)
+XIP_STATIC int
+xipSetSectionOffset(void* io_image, const int i_section,
+ const uint32_t i_offset)
{
SbeXipSection* section;
int rc;
- rc = getSectionPointer(io_image, i_section, &section);
+ rc = xipGetSectionPointer(io_image, i_section, &section);
if (!rc) {
- section->iv_offset = revle32(i_offset);
+ section->iv_offset = xipRevLe32(i_offset);
}
return rc;
}
@@ -570,15 +604,15 @@ setSectionOffset(void* io_image, const int i_section, const uint32_t i_offset)
/// Set the size of a section
-static int
-setSectionSize(void* io_image, const int i_section, const uint32_t i_size)
+XIP_STATIC int
+xipSetSectionSize(void* io_image, const int i_section, const uint32_t i_size)
{
SbeXipSection* section;
int rc;
- rc = getSectionPointer(io_image, i_section, &section);
+ rc = xipGetSectionPointer(io_image, i_section, &section);
if (!rc) {
- section->iv_size = revle32(i_size);
+ section->iv_size = xipRevLe32(i_size);
}
return rc;
}
@@ -591,11 +625,11 @@ setSectionSize(void* io_image, const int i_section, const uint32_t i_size)
// section contains the address - if none then the image is corrupted. We can
// (must) use the 32-bit offset form of the address here.
-static int
-pore2Section(const void* i_image,
- const uint64_t i_poreAddress,
- int* o_section,
- uint32_t* o_offset)
+XIP_STATIC int
+xipPore2Section(const void* i_image,
+ const uint64_t i_poreAddress,
+ int* o_section,
+ uint32_t* o_offset)
{
int rc, sectionId;
SbeXipSection section;
@@ -604,19 +638,20 @@ pore2Section(const void* i_image,
do {
rc = 0;
- if ((i_poreAddress < linkAddress(i_image)) ||
- (i_poreAddress > (linkAddress(i_image) + imageSize(i_image)))) {
+ if ((i_poreAddress < xipLinkAddress(i_image)) ||
+ (i_poreAddress >
+ (xipLinkAddress(i_image) + xipImageSize(i_image)))) {
rc = TRACE_ERRORX(SBE_XIP_INVALID_ARGUMENT,
"pore2section: The i_poreAddress argument "
"(" F0x016llx ")\nis outside the bounds of the "
"image (" F0x016llx ":" F0x016llx ")\n",
i_poreAddress,
- linkAddress(i_image),
- linkAddress(i_image) + imageSize(i_image));
+ xipLinkAddress(i_image),
+ xipLinkAddress(i_image) + xipImageSize(i_image));
break;
}
- addressOffset = (i_poreAddress - linkAddress(i_image)) & 0xffffffff;
+ addressOffset = (i_poreAddress - xipLinkAddress(i_image)) & 0xffffffff;
for (sectionId = 0; sectionId < SBE_XIP_SECTIONS; sectionId++) {
rc = sbe_xip_get_section(i_image, sectionId, &section);
@@ -655,12 +690,12 @@ pore2Section(const void* i_image,
///
/// All return values are optional.
-static int
-getToc(void* i_image,
- SbeXipToc** o_toc,
- size_t* o_entries,
- int* o_sorted,
- char** o_strings)
+XIP_STATIC int
+xipGetToc(void* i_image,
+ SbeXipToc** o_toc,
+ size_t* o_entries,
+ int* o_sorted,
+ char** o_strings)
{
int rc;
SbeXipSection tocSection, stringsSection;
@@ -670,7 +705,7 @@ getToc(void* i_image,
if (rc) break;
rc = sbe_xip_get_section(i_image, SBE_XIP_SECTION_STRINGS,
- &stringsSection);
+ &stringsSection);
if (rc) break;
if (o_toc) {
@@ -680,7 +715,7 @@ getToc(void* i_image,
*o_entries = tocSection.iv_size / sizeof(SbeXipToc);
}
if (o_sorted) {
- *o_sorted = sorted(i_image);
+ *o_sorted = xipSorted(i_image);
}
if (o_strings) {
*o_strings = (char*)i_image + stringsSection.iv_offset;
@@ -692,12 +727,12 @@ getToc(void* i_image,
/// Compare two normalized TOC entries for sorting.
-static int
-compareToc(const SbeXipToc* i_a, const SbeXipToc* i_b,
- const char* i_strings)
+XIP_STATIC int
+xipCompareToc(const SbeXipToc* i_a, const SbeXipToc* i_b,
+ const char* i_strings)
{
- return strcmp(i_strings + revle32(i_a->iv_id),
- i_strings + revle32(i_b->iv_id));
+ return strcmp(i_strings + xipRevLe32(i_a->iv_id),
+ i_strings + xipRevLe32(i_b->iv_id));
}
@@ -705,9 +740,9 @@ compareToc(const SbeXipToc* i_a, const SbeXipToc* i_b,
// Note: The stack requirement is limited to 256 bytes + minor local storage.
-static void
-quickSort(SbeXipToc* io_toc, int i_left, int i_right,
- const char* i_strings)
+XIP_STATIC void
+xipQuickSort(SbeXipToc* io_toc, int i_left, int i_right,
+ const char* i_strings)
{
int i, j, left, right, sp;
SbeXipToc pivot, temp;
@@ -728,10 +763,10 @@ quickSort(SbeXipToc* io_toc, int i_left, int i_right,
pivot = io_toc[(i + j) / 2];
while (i <= j) {
- while (compareToc(&(io_toc[i]), &pivot, i_strings) < 0) {
+ while (xipCompareToc(&(io_toc[i]), &pivot, i_strings) < 0) {
i++;
}
- while (compareToc(&(io_toc[j]), &pivot, i_strings) > 0) {
+ while (xipCompareToc(&(io_toc[j]), &pivot, i_strings) > 0) {
j--;
}
if (i <= j) {
@@ -756,8 +791,8 @@ quickSort(SbeXipToc* io_toc, int i_left, int i_right,
/// TOC linear search
-static int
-linearSearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
+XIP_STATIC int
+xipLinearSearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
{
int rc;
SbeXipToc *imageToc, hostToc;
@@ -765,10 +800,10 @@ linearSearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
char* strings;
*o_entry = 0;
- rc = getToc(i_image, &imageToc, &entries, 0, &strings);
+ rc = xipGetToc(i_image, &imageToc, &entries, 0, &strings);
if (!rc) {
for (; entries; entries--, imageToc++) {
- translateToc(&hostToc, imageToc);
+ xipTranslateToc(&hostToc, imageToc);
if (strcmp(i_id, strings + hostToc.iv_id) == 0) {
break;
}
@@ -787,8 +822,8 @@ linearSearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
/// A classic binary search of a (presumed) sorted array
-static int
-binarySearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
+XIP_STATIC int
+xipBinarySearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
{
int rc;
SbeXipToc *imageToc;
@@ -799,7 +834,7 @@ binarySearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
do {
*o_entry = 0;
- rc = getToc(i_image, &imageToc, &entries, &sorted, &strings);
+ rc = xipGetToc(i_image, &imageToc, &entries, &sorted, &strings);
if (rc) break;
if (!sorted) {
@@ -811,7 +846,7 @@ binarySearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
right = entries - 1;
while (left <= right) {
next = (left + right) / 2;
- sort = strcmp(i_id, strings + revle32(imageToc[next].iv_id));
+ sort = strcmp(i_id, strings + xipRevLe32(imageToc[next].iv_id));
if (sort == 0) {
*o_entry = &(imageToc[next]);
break;
@@ -835,8 +870,8 @@ binarySearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
/// The TOC is validated by searching for the entry, which will uncover
/// duplicate entries or problems with sorting/searching.
-static int
-validateTocEntry(void* io_image, const SbeXipItem* i_item, void* io_arg)
+XIP_STATIC int
+xipValidateTocEntry(void* io_image, const SbeXipItem* i_item, void* io_arg)
{
int rc;
SbeXipItem found;
@@ -872,7 +907,7 @@ validateTocEntry(void* io_image, const SbeXipItem* i_item, void* io_arg)
#define FNV_PRIME32 16777619u
uint32_t
-hash32(const char* s)
+xipHash32(const char* s)
{
uint32_t hash;
@@ -892,10 +927,10 @@ hash32(const char* s)
// addresses in the TOC are actually 32-bit offsets in the address space named
// in bits 16:31 of the link address of the image.
-static int
-normalizeToc(void* io_image, SbeXipToc *io_imageToc,
- SbeXipHashedToc** io_fixedTocEntry,
- size_t* io_fixedEntriesRemaining)
+XIP_STATIC int
+xipNormalizeToc(void* io_image, SbeXipToc *io_imageToc,
+ SbeXipHashedToc** io_fixedTocEntry,
+ size_t* io_fixedEntriesRemaining)
{
SbeXipToc hostToc;
int idSection, dataSection;
@@ -909,15 +944,16 @@ normalizeToc(void* io_image, SbeXipToc *io_imageToc,
// sections/offsets of the Id string (which must be in .strings) and
// the data.
- translateToc(&hostToc, io_imageToc);
+ xipTranslateToc(&hostToc, io_imageToc);
hostString =
- (char*)pore2Host(io_image, fullAddress(io_image, hostToc.iv_id));
+ (char*)xipPore2Host(io_image,
+ xipFullAddress(io_image, hostToc.iv_id));
- rc = pore2Section(io_image,
- fullAddress(io_image, hostToc.iv_id),
- &idSection,
- &idOffset);
+ rc = xipPore2Section(io_image,
+ xipFullAddress(io_image, hostToc.iv_id),
+ &idSection,
+ &idOffset);
if (rc) break;
if (idSection != SBE_XIP_SECTION_STRINGS) {
@@ -925,10 +961,10 @@ normalizeToc(void* io_image, SbeXipToc *io_imageToc,
break;
}
- rc = pore2Section(io_image,
- fullAddress(io_image, hostToc.iv_data),
- &dataSection,
- &dataOffset);
+ rc = xipPore2Section(io_image,
+ xipFullAddress(io_image, hostToc.iv_data),
+ &dataSection,
+ &dataOffset);
if (rc) break;
// Now replace the Id and data pointers with their offsets, and update
@@ -953,8 +989,8 @@ normalizeToc(void* io_image, SbeXipToc *io_imageToc,
break;
}
- (*io_fixedTocEntry)->iv_hash = revle32(hash32(hostString));
- (*io_fixedTocEntry)->iv_offset = revle16(hostToc.iv_data);
+ (*io_fixedTocEntry)->iv_hash = xipRevLe32(xipHash32(hostString));
+ (*io_fixedTocEntry)->iv_offset = xipRevLe16(hostToc.iv_data);
(*io_fixedTocEntry)->iv_type = hostToc.iv_type;
(*io_fixedTocEntry)->iv_elements = hostToc.iv_elements;
@@ -964,7 +1000,7 @@ normalizeToc(void* io_image, SbeXipToc *io_imageToc,
// Finally update the TOC entry
- translateToc(io_imageToc, &hostToc);
+ xipTranslateToc(io_imageToc, &hostToc);
} while (0);
@@ -975,8 +1011,8 @@ normalizeToc(void* io_image, SbeXipToc *io_imageToc,
// Check for hash collisions in the .fixed mini-TOC. Note that endianness is
// not an issue here, as we're comparing for equality.
-static int
-hashCollision(SbeXipHashedToc* i_fixedToc, size_t i_entries)
+XIP_STATIC int
+xipHashCollision(SbeXipHashedToc* i_fixedToc, size_t i_entries)
{
int rc;
size_t i, j;
@@ -1002,17 +1038,17 @@ hashCollision(SbeXipHashedToc* i_fixedToc, size_t i_entries)
/// Decode a normalized image-format TOC entry into a host-format SbeXipItem
/// structure
-static int
-decodeToc(void* i_image,
- SbeXipToc* i_imageToc,
- SbeXipItem* o_item)
+XIP_STATIC int
+xipDecodeToc(void* i_image,
+ SbeXipToc* i_imageToc,
+ SbeXipItem* o_item)
{
int rc;
SbeXipToc hostToc;
SbeXipSection dataSection, stringsSection;
do {
- if (!normalized(i_image)) {
+ if (!xipNormalized(i_image)) {
rc = TRACE_ERROR(SBE_XIP_NOT_NORMALIZED);
break;
}
@@ -1022,7 +1058,7 @@ decodeToc(void* i_image,
// number of elements in the outgoing structure. The Id string is
// always located in the TOC_STRINGS section.
- translateToc(&hostToc, i_imageToc);
+ xipTranslateToc(&hostToc, i_imageToc);
o_item->iv_toc = i_imageToc;
o_item->iv_type = hostToc.iv_type;
@@ -1051,7 +1087,7 @@ decodeToc(void* i_image,
dataSection.iv_offset + hostToc.iv_data);
o_item->iv_address =
- linkAddress(i_image) + dataSection.iv_offset + hostToc.iv_data;
+ xipLinkAddress(i_image) + dataSection.iv_offset + hostToc.iv_data;
o_item->iv_partial = 0;
@@ -1062,8 +1098,8 @@ decodeToc(void* i_image,
/// Sort the TOC
-static int
-sortToc(void* io_image)
+XIP_STATIC int
+xipSortToc(void* io_image)
{
int rc;
SbeXipToc *hostToc;
@@ -1071,15 +1107,15 @@ sortToc(void* io_image)
char* strings;
do {
- rc = quickCheck(io_image, 1);
+ rc = xipQuickCheck(io_image, 1);
if (rc) break;
- if (sorted(io_image)) break;
+ if (xipSorted(io_image)) break;
- rc = getToc(io_image, &hostToc, &entries, 0, &strings);
+ rc = xipGetToc(io_image, &hostToc, &entries, 0, &strings);
if (rc) break;
- quickSort(hostToc, 0, entries - 1, strings);
+ xipQuickSort(hostToc, 0, entries - 1, strings);
((SbeXipHeader*)io_image)->iv_tocSorted = 1;
@@ -1093,9 +1129,9 @@ sortToc(void* io_image)
// modified to reflect the pad, but the caller must modify the section size to
// reflect the pad.
-static int
-padImage(void* io_image, uint32_t i_allocation,
- uint32_t i_align, uint32_t* pad)
+XIP_STATIC int
+xipPadImage(void* io_image, uint32_t i_allocation,
+ uint32_t i_align, uint32_t* pad)
{
int rc;
@@ -1110,18 +1146,18 @@ padImage(void* io_image, uint32_t i_allocation,
break;
}
- *pad = imageSize(io_image) % i_align;
+ *pad = xipImageSize(io_image) % i_align;
if (*pad != 0) {
*pad = i_align - *pad;
- if ((imageSize(io_image) + *pad) > i_allocation) {
+ if ((xipImageSize(io_image) + *pad) > i_allocation) {
rc = TRACE_ERROR(SBE_XIP_WOULD_OVERFLOW);
break;
}
- memset((void*)((unsigned long)io_image + imageSize(io_image)),
+ memset((void*)((unsigned long)io_image + xipImageSize(io_image)),
0, *pad);
- setImageSize(io_image, imageSize(io_image) + *pad);
+ xipSetImageSize(io_image, xipImageSize(io_image) + *pad);
}
} while (0);
@@ -1131,10 +1167,10 @@ padImage(void* io_image, uint32_t i_allocation,
// Get the .fixed_toc section
-static int
-getFixedToc(void* io_image,
- SbeXipHashedToc** o_imageToc,
- size_t* o_entries)
+XIP_STATIC int
+xipGetFixedToc(void* io_image,
+ SbeXipHashedToc** o_imageToc,
+ size_t* o_entries)
{
int rc;
SbeXipSection section;
@@ -1157,8 +1193,8 @@ getFixedToc(void* io_image,
// adequate. The TOC structures are also small so all byte-reversal is done
// 'by hand' rather than with a translate-type API.
-static int
-fixedFind(void* i_image, const char* i_id, SbeXipItem* o_item)
+XIP_STATIC int
+xipFixedFind(void* i_image, const char* i_id, SbeXipItem* o_item)
{
int rc;
SbeXipHashedToc* toc;
@@ -1168,10 +1204,10 @@ fixedFind(void* i_image, const char* i_id, SbeXipItem* o_item)
uint32_t offset;
do {
- rc = getFixedToc(i_image, &toc, &entries);
+ rc = xipGetFixedToc(i_image, &toc, &entries);
if (rc) break;
- for (hash = revle32(hash32(i_id)); entries != 0; entries--, toc++) {
+ for (hash = xipRevLe32(xipHash32(i_id)); entries != 0; entries--, toc++) {
if (toc->iv_hash == hash) break;
}
@@ -1184,7 +1220,7 @@ fixedFind(void* i_image, const char* i_id, SbeXipItem* o_item)
// The caller may have requested a lookup only (o_item == 0), in which
// case we're done. Otherwise we create a partial SbeXipItem and
- // populate the non-0 fields analogously to the decodeToc()
+ // populate the non-0 fields analogously to the xipDecodeToc()
// routine. The data resides in the .fixed section in this case.
if (o_item == 0) break;
@@ -1204,10 +1240,104 @@ fixedFind(void* i_image, const char* i_id, SbeXipItem* o_item)
break;
}
- offset = fixedSection.iv_offset + revle16(toc->iv_offset);
+ offset = fixedSection.iv_offset + xipRevLe16(toc->iv_offset);
+
+ o_item->iv_imageData = (void*)((uint8_t*)i_image + offset);
+ o_item->iv_address = xipLinkAddress(i_image) + offset;
+
+ } while (0);
+
+ return rc;
+}
+
+
+// Search for an item in the special built-in TOC of header fields, and
+// populate a partial TOC entry if requested.
+//
+// This facility was added to allow header data to be searched by name even
+// when the TOC has been stripped. This API will only be used in the case of a
+// stripped TOC since the header fields are also indexed in the main TOC.
+//
+// The table is allocated on the stack in order to make this code concurrently
+// patchable in PHYP (although PHYP applications will never use this code).
+// The table is small and unsorted so a linear search is adequate, and the
+// stack requirememts are small.
+
+XIP_STATIC int
+xipHeaderFind(void* i_image, const char* i_id, SbeXipItem* o_item)
+{
+ int rc;
+ unsigned i;
+ uint32_t offset;
+ SbeXipSection headerSection;
+
+#define HEADER_TOC(id, field, type) \
+ {#id, offsetof(SbeXipHeader, field), type}
+
+ struct HeaderToc {
+
+ const char* iv_id;
+ uint16_t iv_offset;
+ uint8_t iv_type;
+
+ } toc[] = {
+
+ HEADER_TOC(magic, iv_magic, SBE_XIP_UINT64),
+ HEADER_TOC(entry_offset, iv_entryOffset, SBE_XIP_UINT64),
+ HEADER_TOC(link_address, iv_linkAddress, SBE_XIP_UINT64),
+
+ HEADER_TOC(image_size, iv_imageSize, SBE_XIP_UINT32),
+ HEADER_TOC(build_date, iv_buildDate, SBE_XIP_UINT32),
+ HEADER_TOC(build_time, iv_buildTime, SBE_XIP_UINT32),
+
+ HEADER_TOC(header_version, iv_headerVersion, SBE_XIP_UINT8),
+ HEADER_TOC(toc_normalized, iv_normalized, SBE_XIP_UINT8),
+ HEADER_TOC(toc_sorted, iv_tocSorted, SBE_XIP_UINT8),
+
+ HEADER_TOC(build_user, iv_buildUser, SBE_XIP_STRING),
+ HEADER_TOC(build_host, iv_buildHost, SBE_XIP_STRING),
+
+ };
+
+ do {
+
+ rc = SBE_XIP_ITEM_NOT_FOUND;
+ for (i = 0; i < (sizeof(toc) / sizeof(struct HeaderToc)); i++) {
+ if (strcmp(i_id, toc[i].iv_id) == 0) {
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc) break;
+
+ // The caller may have requested a lookup only (o_item == 0), in which
+ // case we're done. Otherwise we create a partial SbeXipItem and
+ // populate the non-0 fields analogously to the xipDecodeToc()
+ // routine. The data resides in the .fixed section in this case.
+
+ if (o_item == 0) break;
+
+ o_item->iv_partial = 1;
+ o_item->iv_toc = 0;
+ o_item->iv_id = 0;
+
+ o_item->iv_type = toc[i].iv_type;
+ o_item->iv_elements = 1; /* True for now... */
+
+ rc = sbe_xip_get_section(i_image, SBE_XIP_SECTION_HEADER,
+ &headerSection);
+ if (rc) break;
+
+ if (headerSection.iv_size == 0) {
+ rc = TRACE_ERROR(SBE_XIP_DATA_NOT_PRESENT);
+ break;
+ }
+
+ offset = headerSection.iv_offset + toc[i].iv_offset;
o_item->iv_imageData = (void*)((uint8_t*)i_image + offset);
- o_item->iv_address = linkAddress(i_image) + offset;
+ o_item->iv_address = xipLinkAddress(i_image) + offset;
} while (0);
@@ -1253,13 +1383,14 @@ sbe_xip_validate(void* i_image, const uint32_t i_size)
rc = TRACE_ERRORX(SBE_XIP_BUG,
"C/Assembler size mismatch(%d/%d) "
"for SbeXipHashedToc\n",
- sizeof(SbeXipHashedToc), SIZE_OF_SBE_XIP_HASHED_TOC);
+ sizeof(SbeXipHashedToc),
+ SIZE_OF_SBE_XIP_HASHED_TOC);
break;
}
// Validate the image pointer and magic number
- rc = quickCheck(i_image, 0);
+ rc = xipQuickCheck(i_image, 0);
if (rc) break;
// Validate the image size
@@ -1335,8 +1466,135 @@ sbe_xip_validate(void* i_image, const uint32_t i_size)
size = hostHeader.iv_section[SBE_XIP_SECTION_TOC].iv_size;
if (size != 0) {
- if (normalized(i_image)) {
- rc = sbe_xip_map_toc(i_image, validateTocEntry, 0);
+ if (xipNormalized(i_image)) {
+ rc = sbe_xip_map_toc(i_image, xipValidateTocEntry, 0);
+ if (rc) break;
+ }
+ }
+ } while (0);
+ return rc;
+}
+
+
+int
+sbe_xip_validate2(void* i_image, const uint32_t i_size, const uint32_t i_maskIgnores)
+{
+ SbeXipHeader hostHeader;
+ int rc = 0, i;
+ uint32_t linkAddress, imageSize, extent, offset, size;
+ uint8_t alignment;
+
+ sbe_xip_translate_header(&hostHeader, (SbeXipHeader*)i_image);
+
+ do {
+
+ // Validate C/Assembler constraints.
+
+ if (sizeof(SbeXipSection) != SIZE_OF_SBE_XIP_SECTION) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipSection\n",
+ sizeof(SbeXipSection), SIZE_OF_SBE_XIP_SECTION);
+ break;
+ }
+
+ if (sizeof(SbeXipToc) != SIZE_OF_SBE_XIP_TOC) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipToc\n",
+ sizeof(SbeXipToc), SIZE_OF_SBE_XIP_TOC);
+ break;
+ }
+
+ if (sizeof(SbeXipHashedToc) != SIZE_OF_SBE_XIP_HASHED_TOC) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipHashedToc\n",
+ sizeof(SbeXipHashedToc),
+ SIZE_OF_SBE_XIP_HASHED_TOC);
+ break;
+ }
+
+ // Validate the image pointer and magic number
+
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ // Validate the image size
+
+ linkAddress = hostHeader.iv_linkAddress;
+ imageSize = hostHeader.iv_imageSize;
+ extent = linkAddress + imageSize;
+
+ if (imageSize < sizeof(SbeXipHeader)) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate2(%p, %u) : "
+ "The image size recorded in the image "
+ "(%u) is smaller than the header size.\n",
+ i_image, i_size, imageSize);
+ break;
+ }
+ if (imageSize != i_size && !(i_maskIgnores & SBE_XIP_IGNORE_FILE_SIZE)) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate2(%p, %u) : "
+ "The image size recorded in the image "
+ "(%u) does not match the i_size parameter.\n",
+ i_image, i_size, imageSize);
+ break;
+ }
+ if (extent <= linkAddress) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate2(%p, %u) : "
+ "Given the link address (%u) and the "
+ "image size, the image wraps the address space\n",
+ i_image, i_size, linkAddress);
+ break;
+ }
+ if ((imageSize % SBE_XIP_FINAL_ALIGNMENT) != 0) {
+ rc = TRACE_ERRORX(SBE_XIP_ALIGNMENT_ERROR,
+ "sbe_xip_validate2(%p, %u) : "
+ "The image size (%u) is not a multiple of %u\n",
+ i_image, i_size, imageSize,
+ SBE_XIP_FINAL_ALIGNMENT);
+ break;
+ }
+
+ // Validate that all sections appear to be within the image
+ // bounds, and are aligned correctly.
+
+ for (i = 0; i < SBE_XIP_SECTIONS; i++) {
+
+ offset = hostHeader.iv_section[i].iv_offset;
+ size = hostHeader.iv_section[i].iv_size;
+ alignment = hostHeader.iv_section[i].iv_alignment;
+
+ if ((offset > imageSize) ||
+ ((offset + size) > imageSize) ||
+ ((offset + size) < offset)) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "Section %d does not appear to be within "
+ "the bounds of the image\n"
+ "offset = %u, size = %u, image size = %u\n",
+ i, offset, size, imageSize);
+ break;
+ }
+ if ((offset % alignment) != 0) {
+ rc = TRACE_ERRORX(SBE_XIP_ALIGNMENT_ERROR,
+ "Section %d requires %d-byte initial "
+ "alignment but the section offset is %u\n",
+ i, alignment, offset);
+ break;
+ }
+ }
+ if (rc) break;
+
+ // If the TOC exists and the image is normalized, validate each TOC
+ // entry.
+
+ size = hostHeader.iv_section[SBE_XIP_SECTION_TOC].iv_size;
+ if (size != 0) {
+ if (xipNormalized(i_image)) {
+ rc = sbe_xip_map_toc(i_image, xipValidateTocEntry, 0);
if (rc) break;
}
}
@@ -1368,23 +1626,23 @@ sbe_xip_normalize(void* io_image)
size_t tocEntries, fixedTocEntries, fixedEntriesRemaining;
do {
- rc = quickCheck(io_image, 0);
+ rc = xipQuickCheck(io_image, 0);
if (rc) break;
- if (!normalized(io_image)) {
+ if (!xipNormalized(io_image)) {
- rc = getToc(io_image, &imageToc, &tocEntries, 0, 0);
+ rc = xipGetToc(io_image, &imageToc, &tocEntries, 0, 0);
if (rc) break;
- rc = getFixedToc(io_image, &fixedImageToc, &fixedTocEntries);
+ rc = xipGetFixedToc(io_image, &fixedImageToc, &fixedTocEntries);
if (rc) break;
fixedTocEntry = fixedImageToc;
fixedEntriesRemaining = fixedTocEntries;
for (; tocEntries--; imageToc++) {
- rc = normalizeToc(io_image, imageToc,
- &fixedTocEntry, &fixedEntriesRemaining);
+ rc = xipNormalizeToc(io_image, imageToc,
+ &fixedTocEntry, &fixedEntriesRemaining);
if (rc) break;
}
@@ -1396,20 +1654,20 @@ sbe_xip_normalize(void* io_image)
break;
}
- rc = hashCollision(fixedImageToc, fixedTocEntries);
+ rc = xipHashCollision(fixedImageToc, fixedTocEntries);
if (rc) break;
((SbeXipHeader*)io_image)->iv_normalized = 1;
}
- rc = sortToc(io_image);
+ rc = xipSortToc(io_image);
if (rc) break;
for (i = 0; i < SBE_XIP_SECTIONS; i++) {
rc = sbe_xip_get_section(io_image, i, &section);
if (rc) break;
if (section.iv_size == 0) {
- setSectionOffset(io_image, i, 0);
+ xipSetSectionOffset(io_image, i, 0);
}
}
if (rc) break;
@@ -1427,9 +1685,9 @@ sbe_xip_image_size(void* io_image, uint32_t* o_size)
{
int rc;
- rc = quickCheck(io_image, 0);
+ rc = xipQuickCheck(io_image, 0);
if (!rc) {
- *o_size = imageSize(io_image);
+ *o_size = xipImageSize(io_image);
}
return rc;
}
@@ -1443,18 +1701,18 @@ sbe_xip_get_section(const void* i_image,
int rc;
SbeXipSection *imageSection;
- rc = getSectionPointer(i_image, i_sectionId, &imageSection);
+ rc = xipGetSectionPointer(i_image, i_sectionId, &imageSection);
if (!rc) {
- translateSection(o_hostSection, imageSection);
+ xipTranslateSection(o_hostSection, imageSection);
}
return rc;
}
-// If the 'big' TOC is not present, search the mini-TOC that only indexes the
-// fixed section.
+// If the 'big' TOC is not present, search the mini-TOCs that only index the
+// .fixed and .header sections.
int
sbe_xip_find(void* i_image,
@@ -1467,21 +1725,24 @@ sbe_xip_find(void* i_image,
SbeXipSection* tocSection;
do {
- rc = quickCheck(i_image, 1);
+ rc = xipQuickCheck(i_image, 1);
if (rc) break;
- rc = getSectionPointer(i_image, SBE_XIP_SECTION_TOC, &tocSection);
+ rc = xipGetSectionPointer(i_image, SBE_XIP_SECTION_TOC, &tocSection);
if (rc) break;
if (tocSection->iv_size == 0) {
- rc = fixedFind(i_image, i_id, o_item);
+ rc = xipFixedFind(i_image, i_id, o_item);
+ if (rc) {
+ rc = xipHeaderFind(i_image, i_id, o_item);
+ }
break;
}
- if (sorted(i_image)) {
- rc = binarySearch(i_image, i_id, &toc);
+ if (xipSorted(i_image)) {
+ rc = xipBinarySearch(i_image, i_id, &toc);
} else {
- rc = linearSearch(i_image, i_id, &toc);
+ rc = xipLinearSearch(i_image, i_id, &toc);
}
if (rc) break;
@@ -1490,7 +1751,7 @@ sbe_xip_find(void* i_image,
} else {
pitem = &item;
}
- rc = decodeToc(i_image, toc, pitem);
+ rc = xipDecodeToc(i_image, toc, pitem);
if (rc) break;
} while (0);
@@ -1514,7 +1775,7 @@ sbe_xip_map_halt(void* io_image,
uint32_t actualSize;
do {
- rc = quickCheck(io_image, 0);
+ rc = xipQuickCheck(io_image, 0);
if (rc) break;
rc = sbe_xip_get_section(io_image, SBE_XIP_SECTION_HALT, &haltSection);
@@ -1526,7 +1787,7 @@ sbe_xip_map_halt(void* io_image,
while (size) {
rc = i_fn(io_image,
- revle64(halt->iv_address),
+ xipRevLe64(halt->iv_address),
halt->iv_string,
io_arg);
if (rc) break;
@@ -1561,11 +1822,11 @@ typedef struct {
} GetHaltStruct;
-static int
-getHaltMap(void* io_image,
- const uint64_t i_poreAddress,
- const char* i_rcString,
- void* io_arg)
+XIP_STATIC int
+xipGetHaltMap(void* io_image,
+ const uint64_t i_poreAddress,
+ const char* i_rcString,
+ void* io_arg)
{
int rc;
@@ -1592,10 +1853,10 @@ sbe_xip_get_halt(void* io_image,
s.iv_address = i_poreAddress;
do {
- rc = quickCheck(io_image, 0);
+ rc = xipQuickCheck(io_image, 0);
if (rc) break;
- rc = sbe_xip_map_halt(io_image, getHaltMap, &s);
+ rc = sbe_xip_map_halt(io_image, xipGetHaltMap, &s);
if (rc == 0) {
rc = TRACE_ERRORX(SBE_XIP_ITEM_NOT_FOUND,
"sbe_xip_get_halt: No HALT code is associated "
@@ -1623,10 +1884,10 @@ sbe_xip_get_scalar(void *i_image, const char* i_id, uint64_t* o_data)
*o_data = *((uint8_t*)(item.iv_imageData));
break;
case SBE_XIP_UINT32:
- *o_data = revle32(*((uint32_t*)(item.iv_imageData)));
+ *o_data = xipRevLe32(*((uint32_t*)(item.iv_imageData)));
break;
case SBE_XIP_UINT64:
- *o_data = revle64(*((uint64_t*)(item.iv_imageData)));
+ *o_data = xipRevLe64(*((uint64_t*)(item.iv_imageData)));
break;
case SBE_XIP_ADDRESS:
*o_data = item.iv_address;
@@ -1663,10 +1924,10 @@ sbe_xip_get_element(void *i_image,
*o_data = ((uint8_t*)(item.iv_imageData))[i_index];
break;
case SBE_XIP_UINT32:
- *o_data = revle32(((uint32_t*)(item.iv_imageData))[i_index]);
+ *o_data = xipRevLe32(((uint32_t*)(item.iv_imageData))[i_index]);
break;
case SBE_XIP_UINT64:
- *o_data = revle64(((uint64_t*)(item.iv_imageData))[i_index]);
+ *o_data = xipRevLe64(((uint64_t*)(item.iv_imageData))[i_index]);
break;
default:
rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
@@ -1708,10 +1969,10 @@ sbe_xip_read_uint64(const void *i_image,
int rc;
do {
- rc = quickCheck(i_image, 0);
+ rc = xipQuickCheck(i_image, 0);
if (rc) break;
- rc = validatePoreAddress(i_image, i_poreAddress, 8);
+ rc = xipValidatePoreAddress(i_image, i_poreAddress, 8);
if (rc) break;
if (i_poreAddress % 8) {
@@ -1720,7 +1981,7 @@ sbe_xip_read_uint64(const void *i_image,
}
*o_data =
- revle64(*((uint64_t*)pore2Host(i_image, i_poreAddress)));
+ xipRevLe64(*((uint64_t*)xipPore2Host(i_image, i_poreAddress)));
} while(0);
@@ -1741,10 +2002,10 @@ sbe_xip_set_scalar(void* io_image, const char* i_id, const uint64_t i_data)
*((uint8_t*)(item.iv_imageData)) = (uint8_t)i_data;
break;
case SBE_XIP_UINT32:
- *((uint32_t*)(item.iv_imageData)) = revle32((uint32_t)i_data);
+ *((uint32_t*)(item.iv_imageData)) = xipRevLe32((uint32_t)i_data);
break;
case SBE_XIP_UINT64:
- *((uint64_t*)(item.iv_imageData)) = revle64((uint64_t)i_data);
+ *((uint64_t*)(item.iv_imageData)) = xipRevLe64((uint64_t)i_data);
break;
default:
rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
@@ -1779,11 +2040,11 @@ sbe_xip_set_element(void *i_image,
break;
case SBE_XIP_UINT32:
((uint32_t*)(item.iv_imageData))[i_index] =
- revle32((uint32_t)i_data);
+ xipRevLe32((uint32_t)i_data);
break;
case SBE_XIP_UINT64:
((uint64_t*)(item.iv_imageData))[i_index] =
- revle64((uint64_t)i_data);
+ xipRevLe64((uint64_t)i_data);
break;
default:
rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
@@ -1832,10 +2093,10 @@ sbe_xip_write_uint64(void *io_image,
int rc;
do {
- rc = quickCheck(io_image, 0);
+ rc = xipQuickCheck(io_image, 0);
if (rc) break;
- rc = validatePoreAddress(io_image, i_poreAddress, 8);
+ rc = xipValidatePoreAddress(io_image, i_poreAddress, 8);
if (rc) break;
if (i_poreAddress % 8) {
@@ -1843,7 +2104,8 @@ sbe_xip_write_uint64(void *io_image,
break;
}
- *((uint64_t*)pore2Host(io_image, i_poreAddress)) = revle64(i_data);
+ *((uint64_t*)xipPore2Host(io_image, i_poreAddress)) =
+ xipRevLe64(i_data);
} while(0);
@@ -1858,7 +2120,7 @@ sbe_xip_delete_section(void* io_image, const int i_sectionId)
SbeXipSection section;
do {
- rc = quickCheck(io_image, 1);
+ rc = xipQuickCheck(io_image, 1);
if (rc) break;
rc = sbe_xip_get_section(io_image, i_sectionId, &section);
@@ -1871,7 +2133,7 @@ sbe_xip_delete_section(void* io_image, const int i_sectionId)
if (section.iv_size == 0) break;
- rc = finalSection(io_image, &final);
+ rc = xipFinalSection(io_image, &final);
if (rc) break;
if (final != i_sectionId) {
@@ -1881,11 +2143,11 @@ sbe_xip_delete_section(void* io_image, const int i_sectionId)
break;
}
- setImageSize(io_image, section.iv_offset);
- setSectionOffset(io_image, i_sectionId, 0);
- setSectionSize(io_image, i_sectionId, 0);
+ xipSetImageSize(io_image, section.iv_offset);
+ xipSetSectionOffset(io_image, i_sectionId, 0);
+ xipSetSectionSize(io_image, i_sectionId, 0);
- finalAlignment(io_image);
+ xipFinalAlignment(io_image);
} while (0);
@@ -1893,6 +2155,11 @@ sbe_xip_delete_section(void* io_image, const int i_sectionId)
}
+#ifndef PPC_HYP
+
+// This API is not needed by PHYP procedures, and is elided since PHYP does
+// not support malloc().
+
int
sbe_xip_duplicate_section(const void* i_image,
const int i_sectionId,
@@ -1905,7 +2172,7 @@ sbe_xip_duplicate_section(const void* i_image,
*o_duplicate = 0;
do {
- rc = quickCheck(i_image, 0);
+ rc = xipQuickCheck(i_image, 0);
if (rc) break;
rc = sbe_xip_get_section(i_image, i_sectionId, &section);
@@ -1927,7 +2194,7 @@ sbe_xip_duplicate_section(const void* i_image,
}
memcpy(*o_duplicate,
- hostAddressFromOffset(i_image, section.iv_offset),
+ xipHostAddressFromOffset(i_image, section.iv_offset),
section.iv_size);
@@ -1942,6 +2209,8 @@ sbe_xip_duplicate_section(const void* i_image,
return rc;
}
+#endif // PPC_HYP
+
int
sbe_xip_append(void* io_image,
@@ -1957,7 +2226,7 @@ sbe_xip_append(void* io_image,
uint32_t pad;
do {
- rc = quickCheck(io_image, 1);
+ rc = xipQuickCheck(io_image, 1);
if (rc) break;
rc = sbe_xip_get_section(io_image, i_sectionId, &section);
@@ -1971,16 +2240,17 @@ sbe_xip_append(void* io_image,
// the image to the specified section alignment. Note that the
// size of the previously final section does not change.
- rc = padImage(io_image, i_allocation, section.iv_alignment, &pad);
+ rc = xipPadImage(io_image, i_allocation, section.iv_alignment,
+ &pad);
if (rc) break;
- section.iv_offset = imageSize(io_image);
+ section.iv_offset = xipImageSize(io_image);
} else {
// Otherwise, the section must be the final section in order to
// continue. Remove any padding from the image.
- rc = finalSection(io_image, &final);
+ rc = xipFinalSection(io_image, &final);
if (rc) break;
if (final != i_sectionId) {
@@ -1989,7 +2259,7 @@ sbe_xip_append(void* io_image,
"%d\n", i_sectionId);
break;
}
- setImageSize(io_image, section.iv_offset + section.iv_size);
+ xipSetImageSize(io_image, section.iv_offset + section.iv_size);
}
@@ -1997,7 +2267,7 @@ sbe_xip_append(void* io_image,
// parameter o_sectionOffset and copy the new data into the image (or
// simply clear the space).
- if ((imageSize(io_image) + i_size) > i_allocation) {
+ if ((xipImageSize(io_image) + i_size) > i_allocation) {
rc = TRACE_ERROR(SBE_XIP_WOULD_OVERFLOW);
break;
}
@@ -2005,7 +2275,8 @@ sbe_xip_append(void* io_image,
*o_sectionOffset = section.iv_size;
}
- hostAddress = hostAddressFromOffset(io_image, imageSize(io_image));
+ hostAddress =
+ xipHostAddressFromOffset(io_image, xipImageSize(io_image));
if (i_data == 0) {
memset(hostAddress, 0, i_size);
} else {
@@ -2015,11 +2286,11 @@ sbe_xip_append(void* io_image,
// Update the image size and section table.
- setImageSize(io_image, imageSize(io_image) + i_size);
- finalAlignment(io_image);
+ xipSetImageSize(io_image, xipImageSize(io_image) + i_size);
+ xipFinalAlignment(io_image);
section.iv_size += i_size;
- rc = putSection(io_image, i_sectionId, &section);
+ rc = xipPutSection(io_image, i_sectionId, &section);
if (rc) break;
@@ -2045,7 +2316,7 @@ sbe_xip_section2pore(const void* i_image,
SbeXipSection section;
do {
- rc = quickCheck(i_image, 0);
+ rc = xipQuickCheck(i_image, 0);
if (rc) break;
rc = sbe_xip_get_section(i_image, i_sectionId, &section);
@@ -2061,7 +2332,7 @@ sbe_xip_section2pore(const void* i_image,
break;
}
- *o_poreAddress = linkAddress(i_image) + section.iv_offset + i_offset;
+ *o_poreAddress = xipLinkAddress(i_image) + section.iv_offset + i_offset;
if (*o_poreAddress % 4) {
rc = TRACE_ERROR(SBE_XIP_ALIGNMENT_ERROR);
@@ -2083,10 +2354,10 @@ sbe_xip_pore2section(const void* i_image,
int rc;
do {
- rc = quickCheck(i_image, 0);
+ rc = xipQuickCheck(i_image, 0);
if (rc) break;
- rc = pore2Section(i_image, i_poreAddress, i_section, i_offset);
+ rc = xipPore2Section(i_image, i_poreAddress, i_section, i_offset);
} while(0);
@@ -2102,18 +2373,19 @@ sbe_xip_pore2host(const void* i_image,
int rc;
do {
- rc = quickCheck(i_image, 0);
+ rc = xipQuickCheck(i_image, 0);
if (rc) break;
- if ((i_poreAddress < linkAddress(i_image)) ||
- (i_poreAddress > (linkAddress(i_image) + imageSize(i_image)))) {
+ if ((i_poreAddress < xipLinkAddress(i_image)) ||
+ (i_poreAddress >
+ (xipLinkAddress(i_image) + xipImageSize(i_image)))) {
rc = TRACE_ERROR(SBE_XIP_INVALID_ARGUMENT);
break;
}
*o_hostAddress =
- hostAddressFromOffset(i_image,
- i_poreAddress - linkAddress(i_image));
+ xipHostAddressFromOffset(i_image,
+ i_poreAddress - xipLinkAddress(i_image));
} while(0);
return rc;
@@ -2128,16 +2400,17 @@ sbe_xip_host2pore(const void* i_image,
int rc;
do {
- rc = quickCheck(i_image, 0);
+ rc = xipQuickCheck(i_image, 0);
if (rc) break;
if ((i_hostAddress < i_image) ||
- (i_hostAddress > hostAddressFromOffset(i_image, imageSize(i_image)))) {
+ (i_hostAddress >
+ xipHostAddressFromOffset(i_image, xipImageSize(i_image)))) {
rc = TRACE_ERROR(SBE_XIP_INVALID_ARGUMENT);
break;
}
- *o_poreAddress = linkAddress(i_image) +
+ *o_poreAddress = xipLinkAddress(i_image) +
((unsigned long)i_hostAddress - (unsigned long)i_image);
if (*o_poreAddress % 4) {
rc = TRACE_ERROR(SBE_XIP_ALIGNMENT_ERROR);
@@ -2161,9 +2434,9 @@ sbe_xip_translate_header(SbeXipHeader* o_dest, const SbeXipHeader* i_src)
#error This code assumes the SBE-XIP header version 8 layout
#endif
- o_dest->iv_magic = revle64(i_src->iv_magic);
- o_dest->iv_entryOffset = revle64(i_src->iv_entryOffset);
- o_dest->iv_linkAddress = revle64(i_src->iv_linkAddress);
+ o_dest->iv_magic = xipRevLe64(i_src->iv_magic);
+ o_dest->iv_entryOffset = xipRevLe64(i_src->iv_entryOffset);
+ o_dest->iv_linkAddress = xipRevLe64(i_src->iv_linkAddress);
for (i = 0; i < 5; i++) {
o_dest->iv_reserved64[i] = 0;
@@ -2173,12 +2446,12 @@ sbe_xip_translate_header(SbeXipHeader* o_dest, const SbeXipHeader* i_src)
srcSection = i_src->iv_section;
i < SBE_XIP_SECTIONS;
i++, destSection++, srcSection++) {
- translateSection(destSection, srcSection);
+ xipTranslateSection(destSection, srcSection);
}
- o_dest->iv_imageSize = revle32(i_src->iv_imageSize);
- o_dest->iv_buildDate = revle32(i_src->iv_buildDate);
- o_dest->iv_buildTime = revle32(i_src->iv_buildTime);
+ o_dest->iv_imageSize = xipRevLe32(i_src->iv_imageSize);
+ o_dest->iv_buildDate = xipRevLe32(i_src->iv_buildDate);
+ o_dest->iv_buildTime = xipRevLe32(i_src->iv_buildTime);
for (i = 0; i < 5; i++) {
o_dest->iv_reserved32[i] = 0;
@@ -2220,14 +2493,14 @@ sbe_xip_map_toc(void* io_image,
size_t entries;
do {
- rc = quickCheck(io_image, 0);
+ rc = xipQuickCheck(io_image, 0);
if (rc) break;
- rc = getToc(io_image, &imageToc, &entries, 0, 0);
+ rc = xipGetToc(io_image, &imageToc, &entries, 0, 0);
if (rc) break;
for (; entries--; imageToc++) {
- rc = decodeToc(io_image, imageToc, &item);
+ rc = xipDecodeToc(io_image, imageToc, &item);
if (rc) break;
rc = i_fn(io_image, &item, io_arg);
if (rc) break;
@@ -2236,10 +2509,3 @@ sbe_xip_map_toc(void* io_image,
return rc;
}
-
-
-
-
-
-
-
diff --git a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.h b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.h
index 3bf222ade..726113e73 100644
--- a/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.h
+++ b/src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.h
@@ -23,7 +23,7 @@
#ifndef __SBE_XIP_IMAGE_H
#define __SBE_XIP_IMAGE_H
-// $Id: sbe_xip_image.h,v 1.20 2013/02/06 04:48:45 bcbrock Exp $
+// $Id: sbe_xip_image.h,v 1.23 2013/03/20 21:41:53 cmolsen Exp $
// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ipl/sbe/sbe_xip_image.h,v $
//-----------------------------------------------------------------------------
// *! (C) Copyright International Business Machines Corp. 2011
@@ -87,7 +87,7 @@
#define SBE_XIP_SECTION_STRINGS 8
#define SBE_XIP_SECTION_HALT 9
#define SBE_XIP_SECTION_PIBMEM0 10
-#define SBE_XIP_SECTION_PIBMEM1 11
+#define SBE_XIP_SECTION_DCRINGS 11
#define SBE_XIP_SECTION_RINGS 12
#define SBE_XIP_SECTION_SLW 13
#define SBE_XIP_SECTION_FIT 14
@@ -97,6 +97,23 @@
/// @}
+
+/// \defgroup sbe_xip_validate() ignore masks.
+///
+/// These defines, when matched in sbe_xip_validate(), cause the validation
+/// to skip the check of the corresponding property. The purpose is to more
+/// effectively debug images that may be damaged and which have excess info
+/// before or after the image. The latter will be the case when dumping the
+/// image as a memory block without knowing where the image starts and ends.
+///
+/// @{
+
+#define SBE_XIP_IGNORE_FILE_SIZE (uint32_t)0x00000001
+#define SBE_XIP_IGNORE_ALL (uint32_t)0x80000000
+
+/// @}
+
+
#ifndef __ASSEMBLER__
/// Applications can expand this macro to create an array of section names.
@@ -113,7 +130,7 @@
".strings", \
".halt", \
".pibmem0", \
- ".pibmem1", \
+ ".dcrings", \
".rings", \
".slw", \
".fit", \
@@ -599,6 +616,9 @@ typedef struct {
///
/// \param[in] i_size The putative size of the image
///
+/// \param[in] i_maskIgnores Array of ignore bits representing which properties
+/// should not be checked for in sbe_xip_validate2().
+///
/// This API should be called first by all applications that manipulate
/// SBE-XIP images in host memory. The magic number is validated, and
/// the image is checked for consistency of the section table and table of
@@ -611,6 +631,9 @@ typedef struct {
int
sbe_xip_validate(void* i_image, const uint32_t i_size);
+int
+sbe_xip_validate2(void* i_image, const uint32_t i_size, const uint32_t i_maskIgnores);
+
/// Normalize the SBE-XIP image
///
OpenPOWER on IntegriCloud