diff options
| author | Ed Tanous <ed.tanous@intel.com> | 2017-03-21 13:15:58 -0700 |
|---|---|---|
| committer | Ed Tanous <ed.tanous@intel.com> | 2017-03-21 13:15:58 -0700 |
| commit | 1ccd57c4a6cd397794bb81bbb043a364d02aab4f (patch) | |
| tree | 8dd4969e2394037121d751dc92b01a1749943e72 /scripts | |
| parent | c4771fb4cecd77272a72f3265d19096c83e5e8e9 (diff) | |
| download | bmcweb-1ccd57c4a6cd397794bb81bbb043a364d02aab4f.tar.gz bmcweb-1ccd57c4a6cd397794bb81bbb043a364d02aab4f.zip | |
incremental
Diffstat (limited to 'scripts')
| -rwxr-xr-x | scripts/build_web_assets.py | 136 |
1 files changed, 87 insertions, 49 deletions
diff --git a/scripts/build_web_assets.py b/scripts/build_web_assets.py index 9cad070..58a9b94 100755 --- a/scripts/build_web_assets.py +++ b/scripts/build_web_assets.py @@ -5,17 +5,19 @@ import os import gzip import hashlib from subprocess import Popen, PIPE +import re THIS_DIR = os.path.dirname(os.path.realpath(__file__)) -ENABLE_CACHING = False +ENABLE_CACHING = True -# TODO this needs to be better +# TODO(ed) this needs to be better CONTENT_TYPES = { '.css': "text/css;charset=UTF-8", '.html': "text/html;charset=UTF-8", '.js': "text/html;charset=UTF-8", - '.png': "image/png;charset=UTF-8" + '.png': "image/png;charset=UTF-8", + '.woff': "application/x-font-woff", } CPP_BEGIN_BUFFER = """ @@ -23,25 +25,46 @@ CPP_BEGIN_BUFFER = """ """ -ROUTE_DECLARATION = """void crow::webassets::request_routes(crow::App<crow::TokenAuthorizationMiddleware>& app){ +ROUTE_DECLARATION = """ + +void crow::webassets::request_routes(BmcAppType& app){ +""" + +CPP_MIDDLE_CACHING_HANDLER = """ + res.add_header("Cache-Control", "public, max-age=31556926"); + res.add_header("ETag", "{sha1}"); + if (req.headers.count("If-None-Match") == 1) {{ + if (req.get_header_value("If-None-Match") == "{sha1}"){{ + res.code = 304; + res.end(); + return; + }} + }} """ + CPP_MIDDLE_BUFFER = """ CROW_ROUTE(app, "{relative_path_sha1}")([](const crow::request& req, crow::response& res) {{ + {CPP_MIDDLE_CACHING_HANDLER} res.code = 200; // TODO, if you have a browser from the dark ages that doesn't support gzip, // unzip it before sending based on Accept-Encoding header - res.add_header("Content-Encoding", "gzip"); - res.add_header("Cache-Control", "{cache_control_value}"); + res.add_header("Content-Encoding", "{content_encoding}"); res.add_header("Content-Type", "{content_type}"); res.write({relative_path_escaped}); - res.end(); + res.end(); }}); """ +def twos_comp(val, bits): + """compute the 2's compliment of int value val""" + if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255 + val = val - (1 << bits) # compute negative value + return val # return positive value as is + CPP_END_BUFFER = """ } """ @@ -52,62 +75,57 @@ CPP_END_BUFFER2 = """const static std::string {relative_path_escaped}{{{file_byt def get_relative_path(full_filepath): pathsplit = full_filepath.split(os.path.sep) relative_path = os.path.sep.join(pathsplit[pathsplit.index("static") + 1:]) - - relative_path_escaped = relative_path.replace("/", "_").replace(".", "_").replace("-", "_") + relative_path_escaped = relative_path + for character in ['/', '.', '-']: + relative_path_escaped = relative_path_escaped.replace(character, "_") relative_path = "/static/" + relative_path - # handle the default routes - if relative_path == "/static/index.html": - relative_path = "/" - return relative_path, relative_path_escaped def get_sha1_path_from_relative(relative_path, sha1): if sha1 != "": path, extension = os.path.splitext(relative_path) - return path + "_" + sha1 + extension + return path + "-" + sha1[:10] + extension else: return relative_path - def filter_html(sha1_list, file_content): string_content = file_content.decode() for key, value in sha1_list.items(): - if key != "/": - # todo, this is very naive, do it better (parse the html) - start = "src=\"" + key.lstrip("/") - replace = "src=\"" + get_sha1_path_from_relative(key, value) - #print("REplacing {} with {}".format(start, replace)) - string_content = string_content.replace(start, replace) - - start = "href=\"" + key.lstrip("/") - replace = "href=\"" + get_sha1_path_from_relative(key, value) - #print("REplacing {} with {}".format(start, replace)) - string_content = string_content.replace(start, replace) + key = key.lstrip("/") + replace_name = get_sha1_path_from_relative(key, value) + key = re.escape(key) + string_content = re.sub("((src|href)=[\"'])(" + key + ")([\"'])", "\\1" + replace_name + "\\4", string_content) return string_content.encode() + def main(): """ Main Function """ parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', nargs='+', type=str) parser.add_argument('-o', '--output', type=str) + parser.add_argument('-d', '--debug', action='store_true') args = parser.parse_args() file_list = args.input + file_list = [os.path.realpath(f) for f in file_list] + sha1_list = {} - if ENABLE_CACHING: - # Sha256 hash everthing + if not args.debug: + # TODO(ed) most html and woff cacheable + excluded_types = [".html", ".woff"] + # sha1 hash everthing for full_filepath in file_list: - if not full_filepath.endswith(".html"): + if os.path.splitext(full_filepath)[1] not in excluded_types: with open(full_filepath, 'rb') as input_file: file_content = input_file.read() - sha = hashlib.sha256() + sha = hashlib.sha1() sha.update(file_content) - sha_text = "".join("{:02x}".format(x) for x in sha.digest())[:10] + sha_text = "".join("{:02x}".format(x) for x in sha.digest()) relative_path, relative_path_escaped = get_relative_path(full_filepath) sha1_list[relative_path] = sha_text @@ -126,13 +144,20 @@ def main(): print("Fixing {}".format(relative_path)) file_content = filter_html(sha1_list, file_content) - - file_content = gzip.compress(file_content) - #file_content = file_content[:10] - - array_binary_text = ', '.join('0x{:02x}'.format(x) for x in file_content) - - cpp_output.write(CPP_END_BUFFER2.format(relative_path=relative_path, file_bytes=array_binary_text, relative_path_escaped=relative_path_escaped)) + if not args.debug: + file_content = gzip.compress(file_content) + #file_content = file_content[:10] + # compute the 2s complement. If you don't, you get narrowing warnings from gcc/clang + + array_binary_text = ', '.join(str(twos_comp(x, 8)) for x in file_content) + + cpp_output.write( + CPP_END_BUFFER2.format( + relative_path=relative_path, + file_bytes=array_binary_text, + relative_path_escaped=relative_path_escaped + ) + ) cpp_output.write(ROUTE_DECLARATION) @@ -141,24 +166,37 @@ def main(): relative_path, relative_path_escaped = get_relative_path(full_filepath) sha1 = sha1_list.get(relative_path, '') - relative_path_sha1 = get_sha1_path_from_relative(relative_path, sha1) - content_type = CONTENT_TYPES.get(os.path.splitext(relative_path)[1], "") if content_type == "": print("unknown content type for {}".format(relative_path)) - if sha1 == "": - cache_control_value = "no-cache" + # handle the default routes + if relative_path == "/static/index.html": + relative_path = "/" + + relative_path_sha1 = get_sha1_path_from_relative(relative_path, sha1) + + content_encoding = 'none' if args.debug else 'gzip' + + environment = { + 'relative_path':relative_path, + 'relative_path_escaped': relative_path_escaped, + 'relative_path_sha1': relative_path_sha1, + 'sha1': sha1, + 'sha1_short': sha1[:20], + 'content_type': content_type, + 'ENABLE_CACHING': str(ENABLE_CACHING).lower(), + 'content_encoding': '' + } + if ENABLE_CACHING and sha1 != "": + environment["CPP_MIDDLE_CACHING_HANDLER"] = CPP_MIDDLE_CACHING_HANDLER.format( + **environment + ) else: - cache_control_value = "max-age=31556926" + environment["CPP_MIDDLE_CACHING_HANDLER"] = "" content = CPP_MIDDLE_BUFFER.format( - relative_path=relative_path, - relative_path_escaped=relative_path_escaped, - relative_path_sha1=relative_path_sha1, - sha1=sha1, - content_type=content_type, - cache_control_value=cache_control_value + **environment ) cpp_output.write(content) |

