From 8d6f200c5fdb820eda8e0ed721e465d544209b23 Mon Sep 17 00:00:00 2001 From: Vernon Mauery Date: Wed, 7 Nov 2018 09:55:53 -0800 Subject: netipmid: make Handler asynchronous The dbus call to the main ipmid queue was up to this point synchronous, which means it blocks all other networking and execution until the main queue returns (which may be on the order of seconds for some commands). This is an unacceptable delay, especially when this queue is responsible for timely updates of SOL traffic. This turns the call into an asynchronous one by leveraging shared pointers and an optional action on destruction. So as long as a reference to the Handler object exists, it will live on, waiting to send its response. Once the async dbus call has returned and set the reply in the Handler, it will drop the reference to the shared pointer and the destructor will send out the response over the channel. Tested-by: Run multiple sessions at the same time while monitoring dbus traffic. See that the requests and responses may be interleaved instead of serial. Change-Id: I16fca8dc3d13624eeb1592ec36d1a9af6575f115 Signed-off-by: Vernon Mauery --- message_handler.cpp | 70 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 19 deletions(-) (limited to 'message_handler.cpp') diff --git a/message_handler.cpp b/message_handler.cpp index 58630d9..a45c13c 100644 --- a/message_handler.cpp +++ b/message_handler.cpp @@ -17,8 +17,9 @@ using namespace phosphor::logging; namespace message { +using namespace phosphor::logging; -std::shared_ptr Handler::receive() +bool Handler::receive() { std::vector packet; auto readStatus = 0; @@ -30,51 +31,82 @@ std::shared_ptr Handler::receive() if (readStatus < 0) { log("Error in Read", entry("STATUS=%x", readStatus)); - return nullptr; + return false; } // Unflatten the packet - std::shared_ptr message; - std::tie(message, sessionHeader) = parser::unflatten(packet); + std::tie(inMessage, sessionHeader) = parser::unflatten(packet); auto session = std::get(singletonPool) - .getSession(message->bmcSessionID); + .getSession(inMessage->bmcSessionID); - sessionID = message->bmcSessionID; - message->rcSessionID = session->getRCSessionID(); + sessionID = inMessage->bmcSessionID; + inMessage->rcSessionID = session->getRCSessionID(); session->updateLastTransactionTime(); - return message; + return true; } -std::shared_ptr - Handler::executeCommand(std::shared_ptr inMessage) +Handler::~Handler() +{ + if (outPayload) + { + std::shared_ptr outMessage = + inMessage->createResponse(*outPayload); + if (!outMessage) + { + return; + } + try + { + send(outMessage); + } + catch (const std::exception& e) + { + // send failed, most likely due to a session closure + log("Async RMCP+ reply failed", + entry("EXCEPTION=%s", e.what())); + } + } +} + +void Handler::processIncoming() +{ + // Read the incoming IPMI packet + if (!receive()) + { + return; + } + + // Execute the Command, possibly asynchronously + executeCommand(); + + // send happens during the destructor if a payload was set +} + +void Handler::executeCommand() { // Get the CommandID to map into the command table auto command = inMessage->getCommand(); - std::vector output{}; - if (inMessage->payloadType == PayloadType::IPMI) { if (inMessage->payload.size() < (sizeof(LAN::header::Request) + sizeof(LAN::trailer::Request))) { - return nullptr; + return; } auto start = inMessage->payload.begin() + sizeof(LAN::header::Request); auto end = inMessage->payload.end() - sizeof(LAN::trailer::Request); std::vector inPayload(start, end); - - output = std::get(singletonPool) - .executeCommand(command, inPayload, *this); + std::get(singletonPool) + .executeCommand(command, inPayload, shared_from_this()); } else { - output = std::get(singletonPool) - .executeCommand(command, inMessage->payload, *this); + std::get(singletonPool) + .executeCommand(command, inMessage->payload, shared_from_this()); } - return inMessage->createResponse(output); } void Handler::send(std::shared_ptr outMessage) -- cgit v1.2.1