summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python/lldbsuite/test/dotest_channels.py
diff options
context:
space:
mode:
authorTodd Fiala <todd.fiala@gmail.com>2016-04-20 16:27:27 +0000
committerTodd Fiala <todd.fiala@gmail.com>2016-04-20 16:27:27 +0000
commit49d3c15c3e8cc01e2a8af096eeacff746d92c644 (patch)
tree1ace8d217bb1546aa11eccfd5062a738228628e0 /lldb/packages/Python/lldbsuite/test/dotest_channels.py
parente8fc69d1366f2c2fff66735c1af8e1f5b0feaa3f (diff)
downloadbcm5719-llvm-49d3c15c3e8cc01e2a8af096eeacff746d92c644.tar.gz
bcm5719-llvm-49d3c15c3e8cc01e2a8af096eeacff746d92c644.zip
test infra: move test event-related handling into its own package
This change moves all the test event handling and its related ResultsFormatter classes out of the packages/Python/lldbsuite/test dir into a packages/Python/lldbsuite/test_event package. Formatters are moved into a sub-package under that. I am limiting the scope of this change to just the motion and a few minor issues caught by a static Python checker (e.g. removing unused import statements). This is a pre-step for adding package-level tests to the test event system. I also intend to simplify test event results formatter selection after I make sure this doesn't break anybody. See: http://reviews.llvm.org/D19288 Reviewed by: Pavel Labath llvm-svn: 266885
Diffstat (limited to 'lldb/packages/Python/lldbsuite/test/dotest_channels.py')
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest_channels.py208
1 files changed, 0 insertions, 208 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/dotest_channels.py b/lldb/packages/Python/lldbsuite/test/dotest_channels.py
deleted file mode 100644
index 72ff9bd85f1..00000000000
--- a/lldb/packages/Python/lldbsuite/test/dotest_channels.py
+++ /dev/null
@@ -1,208 +0,0 @@
-"""
- The LLVM Compiler Infrastructure
-
-This file is distributed under the University of Illinois Open Source
-License. See LICENSE.TXT for details.
-
-Sync lldb and related source from a local machine to a remote machine.
-
-This facilitates working on the lldb sourcecode on multiple machines
-and multiple OS types, verifying changes across all.
-
-
-This module provides asyncore channels used within the LLDB test
-framework.
-"""
-
-from __future__ import print_function
-from __future__ import absolute_import
-
-
-# System modules
-import asyncore
-import socket
-
-# Third-party modules
-from six.moves import cPickle
-
-# LLDB modules
-
-
-class UnpicklingForwardingReaderChannel(asyncore.dispatcher):
- """Provides an unpickling, forwarding asyncore dispatch channel reader.
-
- Inferior dotest.py processes with side-channel-based test results will
- send test result event data in a pickled format, one event at a time.
- This class supports reconstructing the pickled data and forwarding it
- on to its final destination.
-
- The channel data is written in the form:
- {num_payload_bytes}#{payload_bytes}
-
- The bulk of this class is devoted to reading and parsing out
- the payload bytes.
- """
- def __init__(self, file_object, async_map, forwarding_func):
- asyncore.dispatcher.__init__(self, sock=file_object, map=async_map)
-
- self.header_contents = b""
- self.packet_bytes_remaining = 0
- self.reading_header = True
- self.ibuffer = b''
- self.forwarding_func = forwarding_func
- if forwarding_func is None:
- # This whole class is useless if we do nothing with the
- # unpickled results.
- raise Exception("forwarding function must be set")
-
- # Initiate all connections by sending an ack. This allows
- # the initiators of the socket to await this to ensure
- # that this end is up and running (and therefore already
- # into the async map).
- ack_bytes = bytearray()
- ack_bytes.append(chr(42))
- file_object.send(ack_bytes)
-
- def deserialize_payload(self):
- """Unpickles the collected input buffer bytes and forwards."""
- if len(self.ibuffer) > 0:
- self.forwarding_func(cPickle.loads(self.ibuffer))
- self.ibuffer = b''
-
- def consume_header_bytes(self, data):
- """Consumes header bytes from the front of data.
- @param data the incoming data stream bytes
- @return any data leftover after consuming header bytes.
- """
- # We're done if there is no content.
- if not data or (len(data) == 0):
- return None
-
- full_header_len = 4
-
- assert len(self.header_contents) < full_header_len
-
- bytes_avail = len(data)
- bytes_needed = full_header_len - len(self.header_contents)
- header_bytes_avail = min(bytes_needed, bytes_avail)
- self.header_contents += data[:header_bytes_avail]
- if len(self.header_contents) == full_header_len:
- import struct
- # End of header.
- self.packet_bytes_remaining = struct.unpack(
- "!I", self.header_contents)[0]
- self.header_contents = b""
- self.reading_header = False
- return data[header_bytes_avail:]
-
- # If we made it here, we've exhausted the data and
- # we're still parsing header content.
- return None
-
- def consume_payload_bytes(self, data):
- """Consumes payload bytes from the front of data.
- @param data the incoming data stream bytes
- @return any data leftover after consuming remaining payload bytes.
- """
- if not data or (len(data) == 0):
- # We're done and there's nothing to do.
- return None
-
- data_len = len(data)
- if data_len <= self.packet_bytes_remaining:
- # We're consuming all the data provided.
- self.ibuffer += data
- self.packet_bytes_remaining -= data_len
-
- # If we're no longer waiting for payload bytes,
- # we flip back to parsing header bytes and we
- # unpickle the payload contents.
- if self.packet_bytes_remaining < 1:
- self.reading_header = True
- self.deserialize_payload()
-
- # We're done, no more data left.
- return None
- else:
- # We're only consuming a portion of the data since
- # the data contains more than the payload amount.
- self.ibuffer += data[:self.packet_bytes_remaining]
- data = data[self.packet_bytes_remaining:]
-
- # We now move on to reading the header.
- self.reading_header = True
- self.packet_bytes_remaining = 0
-
- # And we can deserialize the payload.
- self.deserialize_payload()
-
- # Return the remaining data.
- return data
-
- def handle_read(self):
- # Read some data from the socket.
- try:
- data = self.recv(8192)
- # print('driver socket READ: %d bytes' % len(data))
- except socket.error as socket_error:
- print(
- "\nINFO: received socket error when reading data "
- "from test inferior:\n{}".format(socket_error))
- raise
- except Exception as general_exception:
- print(
- "\nERROR: received non-socket error when reading data "
- "from the test inferior:\n{}".format(general_exception))
- raise
-
- # Consume the message content.
- while data and (len(data) > 0):
- # If we're reading the header, gather header bytes.
- if self.reading_header:
- data = self.consume_header_bytes(data)
- else:
- data = self.consume_payload_bytes(data)
-
- def handle_close(self):
- # print("socket reader: closing port")
- self.close()
-
-
-class UnpicklingForwardingListenerChannel(asyncore.dispatcher):
- """Provides a socket listener asyncore channel for unpickling/forwarding.
-
- This channel will listen on a socket port (use 0 for host-selected). Any
- client that connects will have an UnpicklingForwardingReaderChannel handle
- communication over the connection.
-
- The dotest parallel test runners, when collecting test results, open the
- test results side channel over a socket. This channel handles connections
- from inferiors back to the test runner. Each worker fires up a listener
- for each inferior invocation. This simplifies the asyncore.loop() usage,
- one of the reasons for implementing with asyncore. This listener shuts
- down once a single connection is made to it.
- """
- def __init__(self, async_map, host, port, backlog_count, forwarding_func):
- asyncore.dispatcher.__init__(self, map=async_map)
- self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
- self.set_reuse_addr()
- self.bind((host, port))
- self.address = self.socket.getsockname()
- self.listen(backlog_count)
- self.handler = None
- self.async_map = async_map
- self.forwarding_func = forwarding_func
- if forwarding_func is None:
- # This whole class is useless if we do nothing with the
- # unpickled results.
- raise Exception("forwarding function must be set")
-
- def handle_accept(self):
- (sock, addr) = self.socket.accept()
- if sock and addr:
- # print('Incoming connection from %s' % repr(addr))
- self.handler = UnpicklingForwardingReaderChannel(
- sock, self.async_map, self.forwarding_func)
-
- def handle_close(self):
- self.close()
OpenPOWER on IntegriCloud