|
15 | 15 | ###
|
16 | 16 | # -*- coding: utf-8 -*-
|
17 | 17 | """Monolith database implementation. Crawls Redfish and Legacy REST implementations
|
18 |
| - and holds all data retrieved. The created database is called the **monolith** and referenced as |
19 |
| - such in the code documentation.""" |
| 18 | +and holds all data retrieved. The created database is called the **monolith** and referenced as |
| 19 | +such in the code documentation.""" |
20 | 20 |
|
21 | 21 | # ---------Imports---------
|
22 | 22 |
|
@@ -455,6 +455,7 @@ def load(
|
455 | 455 | loadcomplete=False,
|
456 | 456 | path_refresh=False,
|
457 | 457 | json_out=False,
|
| 458 | + single=True, |
458 | 459 | ):
|
459 | 460 | """Walks the entire data model and caches all responses or loads an individual path into
|
460 | 461 | the monolith. Supports both threaded and sequential crawling.
|
@@ -489,7 +490,7 @@ def load(
|
489 | 490 | selectivepath = path
|
490 | 491 | if not selectivepath:
|
491 | 492 | selectivepath = self.client.default_prefix
|
492 |
| - if loadtype == "href" and not self.client.base_url.startswith("blobstore://."): |
| 493 | + if loadtype == "href" and not self.client.base_url.startswith("blobstore://.") and not single: |
493 | 494 | if not self.threads:
|
494 | 495 | for _ in range(6):
|
495 | 496 | workhand = LoadWorker(self.get_queue)
|
@@ -545,6 +546,65 @@ def load(
|
545 | 546 | if self.directory_load and init:
|
546 | 547 | self._populatecollections()
|
547 | 548 |
|
| 549 | + def loadallconfig( |
| 550 | + self, |
| 551 | + path=None, |
| 552 | + includelogs=False, |
| 553 | + init=False, |
| 554 | + crawl=True, |
| 555 | + loadtype="href", |
| 556 | + loadcomplete=False, |
| 557 | + path_refresh=False, |
| 558 | + json_out=False, |
| 559 | + single=True, |
| 560 | + ): |
| 561 | + """Walks the entire data model and caches all responses or loads an individual path into |
| 562 | + the monolith. Supports both threaded and sequential crawling. |
| 563 | +
|
| 564 | + :param path: The path to start the crawl from the provided path if crawling or |
| 565 | + loads the path into monolith. If path is not included, crawl will start with |
| 566 | + the default. The default is */redfish/v1/* or */rest/v1* depending on if the |
| 567 | + system is Redfish or LegacyRest. |
| 568 | + :type path: str. |
| 569 | + :param includelogs: Flag to determine if logs should be downloaded as well in the crawl. |
| 570 | + :type includelogs: bool |
| 571 | + :param init: Flag to determine if this is the initial load. |
| 572 | + :type init: bool |
| 573 | + :param crawl: Flag to determine if load should crawl through found links. |
| 574 | + :type crawl: bool |
| 575 | + :param loadtype: Flag to determine if loading standard links: *href* or schema links: *ref*. |
| 576 | + :type loadtype: str. |
| 577 | + :param loadcomplete: Flag to download the entire data model including registries and |
| 578 | + schemas. |
| 579 | + :type loadcomplete: bool |
| 580 | + :param path_refresh: Flag to reload the path specified, clearing any patches and overwriting the |
| 581 | + current data in the monolith. |
| 582 | + :type path_refresh: bool |
| 583 | + """ |
| 584 | + |
| 585 | + selectivepath = path |
| 586 | + if not selectivepath: |
| 587 | + selectivepath = self.client.default_prefix |
| 588 | + self._load( |
| 589 | + selectivepath, |
| 590 | + originaluri=None, |
| 591 | + crawl=crawl, |
| 592 | + includelogs=includelogs, |
| 593 | + init=init, |
| 594 | + loadtype=loadtype, |
| 595 | + loadcomplete=loadcomplete, |
| 596 | + path_refresh=path_refresh, |
| 597 | + prevpath=None, |
| 598 | + ) |
| 599 | + |
| 600 | + if init: |
| 601 | + if LOGGER.getEffectiveLevel() >= 20 and not json_out: |
| 602 | + sys.stdout.write("Done\n") |
| 603 | + else: |
| 604 | + LOGGER.info("Done\n") |
| 605 | + if self.directory_load and init: |
| 606 | + self._populatecollections() |
| 607 | + |
548 | 608 | def _load(
|
549 | 609 | self,
|
550 | 610 | path,
|
@@ -580,7 +640,9 @@ def _load(
|
580 | 640 | if (path.endswith("?page=1") or path.endswith(".json")) and not loadcomplete:
|
581 | 641 | # Don't download schemas in crawl unless we are loading absolutely everything
|
582 | 642 | return
|
583 |
| - elif not includelogs and crawl: |
| 643 | + elif path == "/" or "telemetry" in path.lower() or not path.startswith("/redfish/v1"): |
| 644 | + return |
| 645 | + elif not includelogs: |
584 | 646 | # Only include logs when asked as there can be an extreme amount of entries
|
585 | 647 | if "/log" in path.lower():
|
586 | 648 | return
|
@@ -1090,23 +1152,55 @@ def _populatecollections(self):
|
1090 | 1152 | colltype = ".".join(coll.split(".", 2)[:2]).split("#")[-1]
|
1091 | 1153 | self.colltypes[typename].add(colltype)
|
1092 | 1154 |
|
1093 |
| - def capture(self, redmono=False): |
1094 |
| - """Crawls the server specified by the client and returns the entire monolith. |
| 1155 | + def capture(self, redmono=False, single=True): |
| 1156 | + """Crawls the server and returns the monolith data or just headers and responses. |
| 1157 | +
|
| 1158 | + :param redmono: If True, returns only headers and responses; otherwise, returns full monolith data. |
| 1159 | + :type redmono: bool |
| 1160 | + :rtype: dict |
| 1161 | + """ |
| 1162 | + self.load(includelogs=False, crawl=True, loadcomplete=True, path_refresh=True, init=True, single=single) |
| 1163 | + |
| 1164 | + if not redmono: |
| 1165 | + return self.to_dict() |
1095 | 1166 |
|
1096 |
| - :param redmono: Flag to return only the headers and responses instead of the entire monolith |
1097 |
| - member data. |
| 1167 | + ret = {} |
| 1168 | + try: |
| 1169 | + for x, v in self.paths.items(): |
| 1170 | + if v: |
| 1171 | + v_resp = v.resp |
| 1172 | + if v_resp: |
| 1173 | + ret[x] = {"Headers": v_resp.getheaders(), "Response": v_resp.dict} |
| 1174 | + except Exception as e: |
| 1175 | + LOGGER.debug("Error in capture: %s", e) |
| 1176 | + ret = {} |
| 1177 | + return ret |
| 1178 | + |
| 1179 | + def captureallconfig(self, single=True, exclueurl=None): |
| 1180 | + """Crawls the server and returns the monolith data or just headers and responses. |
| 1181 | +
|
| 1182 | + :param redmono: If True, returns only headers and responses; otherwise, returns full monolith data. |
1098 | 1183 | :type redmono: bool
|
1099 | 1184 | :rtype: dict
|
1100 | 1185 | """
|
1101 |
| - self.load(includelogs=True, crawl=True, loadcomplete=True, path_refresh=True, init=True) |
1102 |
| - return ( |
1103 |
| - self.to_dict() |
1104 |
| - if not redmono |
1105 |
| - else { |
1106 |
| - x: {"Headers": v.resp.getheaders(), "Response": v.resp.dict} for x, v in list(self.paths.items()) if v |
1107 |
| - } |
| 1186 | + self.loadallconfig( |
| 1187 | + includelogs=False, crawl=True, loadcomplete=True, path_refresh=True, init=True, single=single |
1108 | 1188 | )
|
1109 | 1189 |
|
| 1190 | + ret = {} |
| 1191 | + |
| 1192 | + try: |
| 1193 | + for x, v in self.paths.items(): |
| 1194 | + if exclueurl.lower() not in x.lower(): |
| 1195 | + if v: |
| 1196 | + v_resp = v.resp |
| 1197 | + if v_resp: |
| 1198 | + ret[x] = {"Headers": v_resp.getheaders(), "Response": v_resp.dict} |
| 1199 | + except Exception as e: |
| 1200 | + LOGGER.debug("Error in capture: %s", e) |
| 1201 | + ret = {} |
| 1202 | + return ret |
| 1203 | + |
1110 | 1204 | def killthreads(self):
|
1111 | 1205 | """Function to kill threads on logout"""
|
1112 | 1206 | threads = []
|
|
0 commit comments