diff --git a/aggreg.py b/aggreg.py index 1c4ea0a8e5c05516e83ec5691a1acf35d52a515d..1630a1d5d4ebf2f60dc4ce1293a6aff43d9f9a61 100644 --- a/aggreg.py +++ b/aggreg.py @@ -2,7 +2,8 @@ import feedparser from urllib.parse import urlparse from datetime import datetime import time - +from argparse import ArgumentParser +import yaml def charge_urls(liste_url: list[str]) -> list[dict[str, any] | None]: # if the key 'bozo' is true, then the feed had an error during processing, so we set it to None @@ -55,12 +56,15 @@ def genere_html(liste_evenements: list[dict[str, str]], def main(): - urls = [f"http://192.168.78.{i}/rss.xml" for i in range(3, 15)] - charge = charge_urls(urls) - evenements = fusion_flux(urls, charge, None) - print(evenements) - genere_html(evenements, "aggreg.html") - + parser = ArgumentParser(description="obtain events feeds from RSS and compile them into a nice webpage") + parser.add_argument("-c", "--config", help="specifies a config path instead of the default config path %(default)", default="/etc/eventswrangler.conf") + configpath = parser.parse_args().config + with open(configpath) as file: + conf = yaml.safe_load(file) + urls = [source + '/' + conf['rss-name'] for source in conf['sources']] + feeds = charge_urls(urls) + fusioned = fusion_flux(urls, feeds, conf['tri-chrono']) + genere_html(fusioned, conf['destination']) if __name__ == "__main__": main()