diff --git a/aggreg.py b/aggreg.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..af351c1b492de61919885e5dd08807c0a2cfb349 100644
--- a/aggreg.py
+++ b/aggreg.py
@@ -0,0 +1,34 @@
+import feedparser
+from urllib.parse import urlparse
+from datetime import datetime
+
+
+def charge_urls(liste_url: list[str]) -> list[dict[str, any] | None]:
+    # if the key 'bozo' is true, then the feed had an error during processing, so we set it to None
+    return [
+        feed if not (feed := feedparser.parse(url))['bozo'] else None
+        for url in liste_url
+    ]
+
+
+def fusion_flux(liste_url: list[str], liste_flux: list[dict[str, any] | None],
+                tri_chrono: bool) -> list[dict[str, str]]:
+    feeds = [{
+        'titre': entry['title'],
+        'categorie': entry['category'],
+        'serveur': urlparse(entry['title_detail']['base']).netloc,
+        'date_publi': entry['published'],
+        'lien': entry['link'],
+        'description': entry['description']
+    } for feed in liste_flux for entry in feed['entries']]
+    if tri_chrono:
+        feeds.sort(key=lambda e: datetime.strptime(e['date_publi'],
+                                                   "%a, %d %b %Y %H:%M"),
+                   reverse=True)
+    else:
+        feeds.sort(
+            key=lambda e: ["CRITICAL", "MAJOR", "MINOR"].index(e['categorie']))
+    return feeds
+
+
+def main():