summaryrefslogtreecommitdiffstats
path: root/message_handler.cpp
diff options
context:
space:
mode:
authorVernon Mauery <vernon.mauery@linux.intel.com>2018-11-07 09:55:53 -0800
committerVernon Mauery <vernon.mauery@linux.intel.com>2019-02-25 14:40:59 -0800
commit8d6f200c5fdb820eda8e0ed721e465d544209b23 (patch)
tree377753653691c994f193dccb364e6684b5cf929f /message_handler.cpp
parent7b98c0725eec2a09bf65ee1e78839fc2c4a3da03 (diff)
downloadphosphor-net-ipmid-8d6f200c5fdb820eda8e0ed721e465d544209b23.tar.gz
phosphor-net-ipmid-8d6f200c5fdb820eda8e0ed721e465d544209b23.zip
netipmid: make Handler asynchronous
The dbus call to the main ipmid queue was up to this point synchronous, which means it blocks all other networking and execution until the main queue returns (which may be on the order of seconds for some commands). This is an unacceptable delay, especially when this queue is responsible for timely updates of SOL traffic. This turns the call into an asynchronous one by leveraging shared pointers and an optional action on destruction. So as long as a reference to the Handler object exists, it will live on, waiting to send its response. Once the async dbus call has returned and set the reply in the Handler, it will drop the reference to the shared pointer and the destructor will send out the response over the channel. Tested-by: Run multiple sessions at the same time while monitoring dbus traffic. See that the requests and responses may be interleaved instead of serial. Change-Id: I16fca8dc3d13624eeb1592ec36d1a9af6575f115 Signed-off-by: Vernon Mauery <vernon.mauery@linux.intel.com>
Diffstat (limited to 'message_handler.cpp')
-rw-r--r--message_handler.cpp70
1 files changed, 51 insertions, 19 deletions
diff --git a/message_handler.cpp b/message_handler.cpp
index 58630d9..a45c13c 100644
--- a/message_handler.cpp
+++ b/message_handler.cpp
@@ -17,8 +17,9 @@ using namespace phosphor::logging;
namespace message
{
+using namespace phosphor::logging;
-std::shared_ptr<Message> Handler::receive()
+bool Handler::receive()
{
std::vector<uint8_t> packet;
auto readStatus = 0;
@@ -30,51 +31,82 @@ std::shared_ptr<Message> Handler::receive()
if (readStatus < 0)
{
log<level::ERR>("Error in Read", entry("STATUS=%x", readStatus));
- return nullptr;
+ return false;
}
// Unflatten the packet
- std::shared_ptr<Message> message;
- std::tie(message, sessionHeader) = parser::unflatten(packet);
+ std::tie(inMessage, sessionHeader) = parser::unflatten(packet);
auto session = std::get<session::Manager&>(singletonPool)
- .getSession(message->bmcSessionID);
+ .getSession(inMessage->bmcSessionID);
- sessionID = message->bmcSessionID;
- message->rcSessionID = session->getRCSessionID();
+ sessionID = inMessage->bmcSessionID;
+ inMessage->rcSessionID = session->getRCSessionID();
session->updateLastTransactionTime();
- return message;
+ return true;
}
-std::shared_ptr<Message>
- Handler::executeCommand(std::shared_ptr<Message> inMessage)
+Handler::~Handler()
+{
+ if (outPayload)
+ {
+ std::shared_ptr<Message> outMessage =
+ inMessage->createResponse(*outPayload);
+ if (!outMessage)
+ {
+ return;
+ }
+ try
+ {
+ send(outMessage);
+ }
+ catch (const std::exception& e)
+ {
+ // send failed, most likely due to a session closure
+ log<level::INFO>("Async RMCP+ reply failed",
+ entry("EXCEPTION=%s", e.what()));
+ }
+ }
+}
+
+void Handler::processIncoming()
+{
+ // Read the incoming IPMI packet
+ if (!receive())
+ {
+ return;
+ }
+
+ // Execute the Command, possibly asynchronously
+ executeCommand();
+
+ // send happens during the destructor if a payload was set
+}
+
+void Handler::executeCommand()
{
// Get the CommandID to map into the command table
auto command = inMessage->getCommand();
- std::vector<uint8_t> output{};
-
if (inMessage->payloadType == PayloadType::IPMI)
{
if (inMessage->payload.size() <
(sizeof(LAN::header::Request) + sizeof(LAN::trailer::Request)))
{
- return nullptr;
+ return;
}
auto start = inMessage->payload.begin() + sizeof(LAN::header::Request);
auto end = inMessage->payload.end() - sizeof(LAN::trailer::Request);
std::vector<uint8_t> inPayload(start, end);
-
- output = std::get<command::Table&>(singletonPool)
- .executeCommand(command, inPayload, *this);
+ std::get<command::Table&>(singletonPool)
+ .executeCommand(command, inPayload, shared_from_this());
}
else
{
- output = std::get<command::Table&>(singletonPool)
- .executeCommand(command, inMessage->payload, *this);
+ std::get<command::Table&>(singletonPool)
+ .executeCommand(command, inMessage->payload, shared_from_this());
}
- return inMessage->createResponse(output);
}
void Handler::send(std::shared_ptr<Message> outMessage)
OpenPOWER on IntegriCloud