mirror of
https://forge.chapril.org/tykayn/orgmode-to-gemini-blog
synced 2025-06-20 09:04:42 +02:00
script de stats
This commit is contained in:
parent
6d77de4696
commit
759f30f628
7 changed files with 196 additions and 109 deletions
127
atom_generate.py
127
atom_generate.py
|
@ -1,111 +1,56 @@
|
|||
import os
|
||||
import re
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
# Chemin du dossier source
|
||||
import argparse
|
||||
|
||||
from utils import find_first_level1_title, find_year_and_slug_on_filename, find_extract_in_content_org
|
||||
from utils import get_blog_template_conf
|
||||
from website_config import configs_sites
|
||||
|
||||
# Configuration des arguments de la ligne de commande
|
||||
parser = argparse.ArgumentParser(description="Générer un nouvel article en mode orgmode.")
|
||||
parser = argparse.ArgumentParser(description="Générer un flux Atom des articles.")
|
||||
parser.add_argument("blog", help="Le nom du dossier de blog.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
website_ndd = configs_sites[args.blog]['NDD']
|
||||
blog = 'sources/'+args.blog+'/lang_fr/'
|
||||
template_content = get_blog_template_conf(args.blog)
|
||||
website_ndd = template_content['NDD']
|
||||
|
||||
# Expression régulière pour extraire la date du contenu de l'article
|
||||
date_regex = re.compile(r"\b(\d{14})\b")
|
||||
date_regex_org = re.compile(r"\b(\d{4}-\d{2}-\d{2})\b")
|
||||
# Charger les données du fichier articles_info.json
|
||||
json_file = f'sources/{args.blog}/build/articles_info.json'
|
||||
with open(json_file, 'r', encoding='utf-8') as f:
|
||||
articles_info = json.load(f)
|
||||
|
||||
# Liste des fichiers org-mode trouvés
|
||||
org_files = []
|
||||
|
||||
limit_articles_feed=1000
|
||||
count_articles=0
|
||||
|
||||
# Parcourt le dossier source à la recherche de fichiers org-mode
|
||||
for root, dirs, files in os.walk(blog):
|
||||
for file in files:
|
||||
if file.endswith(".org"):
|
||||
date_str, annee, slug = find_year_and_slug_on_filename(file)
|
||||
with open(os.path.join(root, file), "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
extract = find_extract_in_content_org(content)
|
||||
count_articles+=1
|
||||
match = date_regex_org.search(date_str)
|
||||
if match:
|
||||
date = datetime.strptime(match.group(1), "%Y-%m-%d")
|
||||
org_files.append((date, os.path.join(root, file), annee, slug,extract))
|
||||
|
||||
if count_articles > limit_articles_feed:
|
||||
break
|
||||
if count_articles > limit_articles_feed:
|
||||
break
|
||||
|
||||
org_files.sort(reverse=True)
|
||||
# Trier les articles par date décroissante
|
||||
sorted_articles = sorted(articles_info.values(), key=lambda x: x['date'], reverse=True)
|
||||
|
||||
# Génération du flux Atom
|
||||
atom_feed = {"title": "Flux Atom des articles de "+args.blog,
|
||||
"link": f"{website_ndd}/feed",
|
||||
"updated": org_files[0][0],
|
||||
"entries": []}
|
||||
|
||||
for date, file, annee, slug, extract in org_files:
|
||||
# Parse le fichier org-mode pour extraire le titre, la description et la date de publication
|
||||
with open(file, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
title = find_first_level1_title(content)
|
||||
description = title
|
||||
# published = date_str
|
||||
# Ajoute l'article au flux Atom
|
||||
atom_entry = {"title": title,
|
||||
"summary": extract,
|
||||
"link": f"{website_ndd}/{annee}/{slug}",
|
||||
"published": date
|
||||
}
|
||||
atom_feed["entries"].append(atom_entry)
|
||||
|
||||
# Enregistrement du flux Atom dans un fichier XML
|
||||
# Le flux Atom doit contenir:
|
||||
# - Un ID unique pour le flux et chaque entrée
|
||||
# - Une balise author avec name et email
|
||||
# - Les dates au format ISO 8601 avec timezone
|
||||
# - Un lien self vers le fichier XML
|
||||
with open(f"index_{args.blog}.xml", "w", encoding="utf-8") as f:
|
||||
with open(f"html-websites/{args.blog}/feed/index.xml", "w", encoding="utf-8") as f:
|
||||
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
|
||||
f.write('<feed xmlns="http://www.w3.org/2005/Atom">\n')
|
||||
f.write(f' <title>{atom_feed["title"]}</title>\n')
|
||||
f.write(f' <link href="{atom_feed["link"]}"/>\n')
|
||||
f.write(f' <updated>{atom_feed["updated"]}</updated>\n')
|
||||
f.write(f' <title>Flux Atom des articles de {args.blog}</title>\n')
|
||||
f.write(f' <link href="{website_ndd}/feed"/>\n')
|
||||
f.write(f' <updated>{datetime.fromisoformat(sorted_articles[0]["date"]).strftime("%Y-%m-%dT%H:%M:%S+00:00")}</updated>\n')
|
||||
f.write(' <id>tag:' + website_ndd + ',2023:/feed</id>\n')
|
||||
f.write(' <author>\n')
|
||||
f.write(' <name>Auteur du blog</name>\n')
|
||||
f.write(' <email>auteur@example.com</email>\n')
|
||||
f.write(f' <name>{configs_sites[args.blog]["AUTHOR"]}</name>\n')
|
||||
f.write(f' <email>{configs_sites[args.blog]["EMAIL"]}</email>\n')
|
||||
f.write(' </author>\n')
|
||||
f.write(f' <link rel="self" href="{website_ndd}/feed"/>\n')
|
||||
|
||||
for entry in atom_feed["entries"]:
|
||||
slug_id = entry["title"].lower().replace(" ", "-").replace("'", "-").replace("--", "-")
|
||||
with open(file, "r", encoding="utf-8") as article_file:
|
||||
article_content = article_file.read()
|
||||
|
||||
f.write(' <entry>\n')
|
||||
f.write(f' <id>tag:{website_ndd},2023:{entry["link"]}</id>\n')
|
||||
f.write(f' <title>{entry["title"]}</title>\n')
|
||||
f.write(f' <link href="{entry["link"]}"/>\n')
|
||||
f.write(' <content type="html"><![CDATA[\n')
|
||||
f.write(f' {article_content}\n')
|
||||
f.write(' ]]></content>\n')
|
||||
f.write(f' <summary>{entry["summary"]}</summary>\n')
|
||||
f.write(f' <published>{entry["published"].strftime("%Y-%m-%dT%H:%M:%S+00:00")}</published>\n')
|
||||
f.write(f' <updated>{entry["published"].strftime("%Y-%m-%dT%H:%M:%S+00:00")}</updated>\n')
|
||||
f.write(' <author>\n')
|
||||
f.write(f" <name>{configs_sites[args.blog]['AUTHOR']}</name>\n")
|
||||
f.write(f" <email>{configs_sites[args.blog]['EMAIL']}</email>\n")
|
||||
f.write(' </author>\n')
|
||||
f.write(' </entry>\n')
|
||||
f.write('</feed>')
|
||||
# Boucle des articles
|
||||
for article in sorted_articles:
|
||||
f.write(' <entry>\n')
|
||||
f.write(f' <id>tag:{website_ndd},2023:{article["slug"]}</id>\n')
|
||||
f.write(f' <title>{article["title"]}</title>\n')
|
||||
f.write(f' <link href="{website_ndd}/{article["slug"]}"/>\n')
|
||||
f.write(' <content type="html"><![CDATA[\n')
|
||||
f.write(f' {article["html_content"]}\n')
|
||||
f.write(' ]]></content>\n')
|
||||
f.write(f' <summary>{article.get("extract", "")}</summary>\n')
|
||||
f.write(f' <published>{datetime.fromisoformat(article["date"]).strftime("%Y-%m-%dT%H:%M:%S+00:00")}</published>\n')
|
||||
f.write(f' <updated>{datetime.fromisoformat(article["date"]).strftime("%Y-%m-%dT%H:%M:%S+00:00")}</updated>\n')
|
||||
f.write(' <author>\n')
|
||||
f.write(f' <name>{configs_sites[args.blog]["AUTHOR"]}</name>\n')
|
||||
f.write(f' <email>{configs_sites[args.blog]["EMAIL"]}</email>\n')
|
||||
f.write(' </author>\n')
|
||||
f.write(' </entry>\n')
|
||||
|
||||
f.write('</feed>')
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue