summaryrefslogtreecommitdiffstats
path: root/src/usr/mbox
diff options
context:
space:
mode:
authorBrian Bakke <bbakke@us.ibm.com>2017-08-16 15:56:02 -0500
committerDaniel M. Crowell <dcrowell@us.ibm.com>2017-09-19 16:59:00 -0400
commit046a2655125fa0b4efdafa460d7f60ac0719d669 (patch)
treef9efe4763e8d94c1270e5175b05d7ff86152509a /src/usr/mbox
parent88342b08a1281b0244ba6cff72d64679319589a4 (diff)
downloadtalos-hostboot-046a2655125fa0b4efdafa460d7f60ac0719d669.tar.gz
talos-hostboot-046a2655125fa0b4efdafa460d7f60ac0719d669.zip
Send "recollect dma buffers" message in TI path
Change-Id: Ice352f333cc3cfca7393b335414958612834b3c8 RTC:170428 CQ:SW359697 Reviewed-on: http://ralgit01.raleigh.ibm.com/gerrit1/44706 Reviewed-by: Martin Gloff <mgloff@us.ibm.com> Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com> Reviewed-by: Christian R. Geddes <crgeddes@us.ibm.com> Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com> Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com> Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com>
Diffstat (limited to 'src/usr/mbox')
-rw-r--r--src/usr/mbox/mailboxsp.C207
-rw-r--r--src/usr/mbox/mailboxsp.H43
-rw-r--r--src/usr/mbox/mbox_dma_buffer.C4
-rw-r--r--src/usr/mbox/mbox_dma_buffer.H30
4 files changed, 248 insertions, 36 deletions
diff --git a/src/usr/mbox/mailboxsp.C b/src/usr/mbox/mailboxsp.C
index 4a968ed27..355d5a4e3 100644
--- a/src/usr/mbox/mailboxsp.C
+++ b/src/usr/mbox/mailboxsp.C
@@ -49,6 +49,7 @@
#include <kernel/console.H>
#include <arch/pirformat.H>
#include <sbeio/sbeioif.H>
+#include <sys/time.h>
// Local functions
namespace MBOX
@@ -90,7 +91,9 @@ MailboxSp::MailboxSp()
iv_allow_blk_resp(false),
iv_sum_alloc(0),
iv_pend_alloc(),
- iv_allocAddrs()
+ iv_allocAddrs(),
+ iv_reclaim_sent_cnt(0),
+ iv_reclaim_rsp_cnt(0)
{
// mailbox target
TARGETING::targetService().masterProcChipTargetHandle(iv_trgt);
@@ -280,27 +283,55 @@ void MailboxSp::msgHandler()
crit_assert(0);
}
+ // note : an outstanding req dma bfrs msg
+ // will cause quiesce to be false
if(iv_shutdown_msg && quiesced())
{
- // If all DMA buffers still not owned
- // try once to get them all back.
- if(!iv_dmaBuffer.ownsAllBlocks() &&
- !iv_dmaBuffer.shutdownDmaRequestSent())
+ if // all DMA buffers have been reclaimed
+ ( iv_dmaBuffer.ownsAllBlocks() )
{
- iv_dmaBuffer.setShutdownDmaRequestSent(true);
- mbox_msg_t dma_request_msg;
- dma_request_msg.msg_queue_id = FSP_MAILBOX_MSGQ;
- dma_request_msg.msg_payload.type =
- MSG_REQUEST_DMA_BUFFERS;
- dma_request_msg.msg_payload.__reserved__async = 1;
-
- send_msg(&dma_request_msg);
+ // continue with shutdown
+ TRACFCOMP(g_trac_mbox,
+ INFO_MRK"MBOXSP DMA bfrs reclaimed "
+ "on shutdown");
+
+ handleShutdown();
+ }
+
+ else if // a "reclaim bfr" msg is outstanding
+ ( isDmaReqBfrMsgOutstanding() )
+ {
+ // (need to wait for the msg(s) to complete
+ // before sending another msg)
+ TRACFCOMP(g_trac_mbox,
+ INFO_MRK
+ "MailboxSp::msgHandler - "
+ "Wait for Reclaim Msg Completion");
}
+
+ else if // more "reclaim bfr" msgs can be sent
+ ( iv_dmaBuffer.maxShutdownDmaRequestSent() == false )
+ {
+ TRACFCOMP(g_trac_mbox,
+ INFO_MRK
+ "MailboxSp::msgHandler - "
+ "Send Reclaim Msg to FSP");
+
+ // send a "reclaim bfr" msg
+ iv_dmaBuffer.incrementShutdownDmaRequestSentCnt();
+ sendReclaimDmaBfrsMsg();
+ }
+
else
{
+ // continue with shutdown
+ TRACFCOMP(g_trac_mbox,
+ INFO_MRK"MBOXSP DMA bfrs not reclaimed "
+ "on shutdown");
+
handleShutdown();
}
- }
+ } // end shutdown msg & quiesced
if(iv_suspended && quiesced())
{
@@ -364,15 +395,11 @@ void MailboxSp::msgHandler()
// is not pending, but may not get them all back
// at this time.
//
- if(!iv_dmaBuffer.ownsAllBlocks() && !iv_dma_pend)
+ if( !iv_dmaBuffer.ownsAllBlocks() &&
+ !iv_dma_pend &&
+ !isDmaReqBfrMsgOutstanding() )
{
- mbox_msg_t dma_request_msg;
- dma_request_msg.msg_queue_id = FSP_MAILBOX_MSGQ;
- dma_request_msg.msg_payload.type =
- MSG_REQUEST_DMA_BUFFERS;
- dma_request_msg.msg_payload.__reserved__async = 1;
-
- send_msg(&dma_request_msg);
+ sendReclaimDmaBfrsMsg();
}
if(quiesced()) //already in shutdown state
@@ -1146,6 +1173,9 @@ void MailboxSp::handle_hbmbox_resp(mbox_msg_t & i_mbox_msg)
iv_dmaBuffer.addBuffers
(i_mbox_msg.msg_payload.data[0]);
+ // track response received
+ iv_reclaim_rsp_cnt++;
+
iv_dma_pend = false;
send_msg(); // send next message, if there is one
@@ -1241,6 +1271,121 @@ errlHndl_t MailboxSp::send(queue_id_t i_q_id,
return err;
}
+/**
+ * Reclaim any DMA buffers owned by the FSP
+ */
+errlHndl_t MailboxSp::reclaimDmaBfrsFromFsp( void )
+{
+ errlHndl_t err = NULL;
+
+ // locate the FSP mailbox
+ MailboxSp & fspMbox = Singleton<MailboxSp>::instance();
+
+ // reclaim the dma bfrs
+ err = fspMbox._reclaimDmaBfrsFromFsp();
+
+ return( err );
+}
+
+errlHndl_t MailboxSp::_reclaimDmaBfrsFromFsp( void )
+{
+ errlHndl_t err = NULL;
+ int msgSentCnt = 0;
+ int maxDmaBfrs = iv_dmaBuffer.maxDmaBfrs();
+
+ TRACFBIN(g_trac_mbox,
+ INFO_MRK
+ "MailboxSp::_reclaimDmaBfrsFromFsp - Start."
+ " DmaBuffer = ",
+ &iv_dmaBuffer,
+ sizeof(iv_dmaBuffer) );
+
+ while // bfrs still need to be reclaimed
+ ( iv_dmaBuffer.ownsAllBlocks() == false )
+ {
+ if // request dma bfrs msg is outstanding
+ ( isDmaReqBfrMsgOutstanding() == true )
+ {
+ // (wait for msg to complete)
+ nanosleep( 0, 1000000 ); // 1ms to avoid tight busy loop
+ task_yield();
+ }
+
+ else if // can send another request dma bfrs msg
+ (msgSentCnt < maxDmaBfrs)
+ {
+ // send the message
+ msgSentCnt++;
+ sendReclaimDmaBfrsMsg();
+ }
+
+ else
+ {
+ // (sent max number of reclaims and bfrs still not free)
+ // (something real bad is happening, exit so we don't hang)
+
+ // create a snapshot of DMA buffer control object for tracing
+ char dmyArray[sizeof(iv_dmaBuffer)];
+ memcpy( &dmyArray[0],
+ (void *)&iv_dmaBuffer,
+ sizeof(dmyArray) );
+
+ TRACFBIN(g_trac_mbox,
+ ERR_MRK
+ "MailboxSp::_reclaimDmaBfrsFromFsp -"
+ "Reclaim Did Not Complete. "
+ "DmaBuffer = ",
+ &dmyArray[0],
+ sizeof(dmyArray) );
+
+ break;
+ }
+ } // end wait for bfrs to be reclaimed
+
+ return( err );
+}
+
+
+void MailboxSp::sendReclaimDmaBfrsMsg( void )
+{
+ // allocate local msg bfr on the stack
+ mbox_msg_t local_msg_bfr;
+
+ sendReclaimDmaBfrsMsg( local_msg_bfr );
+
+ return;
+}
+
+
+void MailboxSp::sendReclaimDmaBfrsMsg( mbox_msg_t & i_mbox_msg )
+{
+ // isolate all occurrences of this message to this routine
+ // so the total number of outstanding Request DMA Bfr
+ // messages can be tracked in one place. This allows
+ // a mechanism to determine if any of these messages are
+ // on either the message Q or have been sent on HW to FSP.
+ // iv_dma_pend only tracks the msg from load on HW to
+ // response received. Does not consider Queue.
+ TRACFCOMP(g_trac_mbox,
+ INFO_MRK
+ "MailboxSp::sendReclaimDmaBfrsMsg - "
+ "Send Reclaim Msg to FSP");
+
+ // send a request dma bfrs msg to reclaim from fsp
+ new (&i_mbox_msg) mbox_msg_t();
+
+ i_mbox_msg.msg_queue_id = FSP_MAILBOX_MSGQ;
+ i_mbox_msg.msg_payload.type = MSG_REQUEST_DMA_BUFFERS;
+ i_mbox_msg.msg_payload.__reserved__async = 1;
+
+ // track the msg until completion;
+ iv_reclaim_sent_cnt++;
+
+ send_msg(&i_mbox_msg);
+
+ return;
+}
+
errlHndl_t MailboxSp::msgq_register(queue_id_t i_queue_id, msg_q_t i_msgQ)
{
@@ -2279,3 +2424,21 @@ void MBOX::deallocate(void * i_ptr)
}
}
+errlHndl_t MBOX::reclaimDmaBfrsFromFsp( void )
+{
+ errlHndl_t err = NULL;
+
+ msg_q_t mboxQ = msg_q_resolve(VFS_ROOT_MSG_MBOX);
+ if(mboxQ)
+ {
+ // reclaim the dma bfrs
+ err = MailboxSp::reclaimDmaBfrsFromFsp();
+ }
+ else
+ {
+ TRACFCOMP(g_trac_mbox, ERR_MRK"MBOX::reclaimDmaBfrsFromFsp - "
+ "Mailbox Service not available");
+ }
+
+ return( err );
+}
diff --git a/src/usr/mbox/mailboxsp.H b/src/usr/mbox/mailboxsp.H
index 4075aa30e..770218342 100644
--- a/src/usr/mbox/mailboxsp.H
+++ b/src/usr/mbox/mailboxsp.H
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
-/* Contributors Listed Below - COPYRIGHT 2012,2015 */
+/* Contributors Listed Below - COPYRIGHT 2012,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -91,6 +91,13 @@ namespace MBOX
msg_t * io_msg,
msgq_msg_t i_mbox_msg_type);
+ /**
+ * Reclaim any DMA buffers owned by the FSP
+ * @return errlHndl_t on error
+ */
+ static errlHndl_t reclaimDmaBfrsFromFsp( void );
+
+
protected:
/**
@@ -200,7 +207,7 @@ namespace MBOX
/**
* Handle interrupt from Intr presenter
- * @param[in] i_msg, The message
+ * @param[in] i_msg, The message
*/
errlHndl_t handleInterrupt();
@@ -285,6 +292,33 @@ namespace MBOX
*/
bool quiesced();
+ /**
+ * Reclaim any DMA buffers owned by the FSP
+ * @return errlHndl_t on error
+ */
+ errlHndl_t _reclaimDmaBfrsFromFsp( void );
+
+ /**
+ * Send a Reclaim DMA Buffers message to FSP
+ * @param[in] i_mbox_msg. Message Buffer to use,
+ */
+ void sendReclaimDmaBfrsMsg(mbox_msg_t & i_mbox_msg);
+
+ /**
+ * Send a Reclaim DMA Buffers message to FSP
+ */
+ void sendReclaimDmaBfrsMsg( void );
+
+ /**
+ * Determine if a Reclaim Bfr message is outstanding
+ * @return [true - Msg active | false - no msg active]
+ */
+ ALWAYS_INLINE
+ bool isDmaReqBfrMsgOutstanding( void ) const
+ {
+ return ( iv_reclaim_sent_cnt > iv_reclaim_rsp_cnt) ;
+ }
+
enum
{
@@ -337,7 +371,8 @@ namespace MBOX
msg_t * iv_shutdown_msg;//!< Message to shutdown mbox
msg_t * iv_suspend_msg; //!< Message to suspend mbox
bool iv_rts; //!< ready to send flag
- bool iv_dma_pend; //!< Request pending for more DMA bufs
+ bool iv_dma_pend; //!< Request pending (on Hw)
+ //!< for more DMA bufs
bool iv_disabled; //!< Mailboxsp shut off (rejects new)
bool iv_suspended; //!< Mailbox is suspended (queues new)
bool iv_suspend_intr;//!< Disable HW interrupts on suspend
@@ -345,6 +380,8 @@ namespace MBOX
uint64_t iv_sum_alloc; //!< Total extra_data storage allocated
msg_list_t iv_pend_alloc; //!< Pending memory allocations
addr_list_t iv_allocAddrs; //!< memory addresses allocated by mbox
+ int iv_reclaim_sent_cnt;//!< num of reclaim bfr msgs sent
+ int iv_reclaim_rsp_cnt; //!< num of reclaim responses rcvd
};
};
diff --git a/src/usr/mbox/mbox_dma_buffer.C b/src/usr/mbox/mbox_dma_buffer.C
index 7a8cc4d2c..78a4d1f6a 100644
--- a/src/usr/mbox/mbox_dma_buffer.C
+++ b/src/usr/mbox/mbox_dma_buffer.C
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
-/* Contributors Listed Below - COPYRIGHT 2012,2015 */
+/* Contributors Listed Below - COPYRIGHT 2012,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -43,7 +43,7 @@ extern trace_desc_t * g_trac_mbox;
DmaBuffer::DmaBuffer() :
iv_head(NULL),
iv_dir(makeMask(VmmManager::MBOX_DMA_PAGES)),
- iv_dma_req_sent(false)
+ iv_dma_req_sent_cnt(0)
{
iv_head = malloc(VmmManager::MBOX_DMA_SIZE);
diff --git a/src/usr/mbox/mbox_dma_buffer.H b/src/usr/mbox/mbox_dma_buffer.H
index acb18df1f..7ae9c9219 100644
--- a/src/usr/mbox/mbox_dma_buffer.H
+++ b/src/usr/mbox/mbox_dma_buffer.H
@@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
-/* Contributors Listed Below - COPYRIGHT 2012,2015 */
+/* Contributors Listed Below - COPYRIGHT 2012,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
@@ -139,23 +139,34 @@ namespace MBOX
}
/**
- * Set the state of shutdown dma request sent
- * @param[in] The state to set [true|false]
+ * Increment the count of shutdown dma request sent
*/
ALWAYS_INLINE
- void setShutdownDmaRequestSent(bool i_state)
+ void incrementShutdownDmaRequestSentCnt( void )
{
- iv_dma_req_sent = i_state;
+ iv_dma_req_sent_cnt++;
}
/**
- * Query if the shutdown DMA request has been sent
+ * Query the max number of DMA bfrs
+ * @return [max number of DMA bfrs]
+ */
+ ALWAYS_INLINE
+ int maxDmaBfrs( void )
+ {
+ // max dma bfrs since tracked by a bit mask variable
+ return( sizeof(iv_dir) * 8 );
+ }
+
+ /**
+ * Query if the max number of shutdown DMA request has been sent
* @return state [true|false]
*/
ALWAYS_INLINE
- bool shutdownDmaRequestSent()
+ bool maxShutdownDmaRequestSent( void )
{
- return iv_dma_req_sent;
+ // max of max number of bfrs since min released is 1 at a time
+ return( iv_dma_req_sent_cnt >= maxDmaBfrs() );
}
private:
@@ -177,7 +188,8 @@ namespace MBOX
void * iv_head; //!< Start of DMA memory
uint64_t iv_phys_head; //!< Physical translation of iv_head
uint64_t iv_dir; //!< 1 bit per 1k buffer, 1 = available
- bool iv_dma_req_sent; //!< Request sent to retrieve all buffers
+ int iv_dma_req_sent_cnt; //!< number of Requests sent to
+ // retrieve all buffers
};
}; // namespace
OpenPOWER on IntegriCloud