2025-02-19 16:24:20 +01:00
|
|
|
#!/bin/python3
|
2025-02-19 16:29:20 +01:00
|
|
|
# trouver les articles précédents et suivants
|
2025-02-19 16:24:20 +01:00
|
|
|
|
|
|
|
from utils import *
|
|
|
|
from website_config import configs_sites
|
|
|
|
|
|
|
|
import os
|
|
|
|
import json
|
|
|
|
import re
|
2025-02-19 16:29:20 +01:00
|
|
|
import argparse
|
|
|
|
|
|
|
|
# Configurer argparse pour prendre le blog en argument
|
|
|
|
parser = argparse.ArgumentParser(description='Générer une liste des derniers articles de blog.')
|
|
|
|
parser.add_argument('blog', type=str, help='Nom du dossier du blog à traiter', default='tykayn_blog')
|
|
|
|
args = parser.parse_args()
|
2025-02-19 16:24:20 +01:00
|
|
|
|
|
|
|
# Fonction pour extraire le basename d'un fichier
|
|
|
|
def get_basename(file_name):
|
|
|
|
return os.path.splitext(file_name)[0]
|
|
|
|
|
|
|
|
# Chemin du dossier contenant les fichiers orgmode
|
2025-02-19 16:29:20 +01:00
|
|
|
directory = f'sources/{args.blog}/lang_fr'
|
|
|
|
destination_json = f'sources/{args.blog}/build'
|
2025-02-19 16:24:20 +01:00
|
|
|
# Dictionnaire pour stocker les informations des fichiers
|
|
|
|
files_dict = {}
|
|
|
|
|
|
|
|
# Parcourir les fichiers du dossier
|
|
|
|
for file_name in os.listdir(directory):
|
|
|
|
if file_name.endswith('.org'):
|
|
|
|
file_path = os.path.join(directory, file_name)
|
|
|
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
|
|
content = f.read()
|
|
|
|
basename = get_basename(file_name)
|
|
|
|
date_str, annee, slug = find_year_and_slug_on_filename(basename)
|
|
|
|
tags = extract_tags_from_file(file_path, global_config['excluded_tags'])
|
|
|
|
# Convertir les tags en liste si c'est un set
|
|
|
|
if isinstance(tags, set):
|
|
|
|
tags = list(tags)
|
|
|
|
boom = basename.split('__')
|
|
|
|
title = find_first_level1_title(content)
|
|
|
|
files_dict[f"{annee}/{slug}"] = {
|
|
|
|
'path': file_path,
|
|
|
|
'basename': basename,
|
|
|
|
'slug': slug,
|
|
|
|
'slug_with_year': f"{annee}/{slug}",
|
|
|
|
'date': boom[0],
|
|
|
|
'annee': annee,
|
|
|
|
'tags': tags, # Assurez-vous que c'est une liste
|
|
|
|
'title': title,
|
|
|
|
'next': None,
|
|
|
|
'previous': None
|
|
|
|
}
|
|
|
|
|
|
|
|
# Trier les basenames par ordre décroissant
|
|
|
|
sorted_basenames = sorted(files_dict.keys(), reverse=True)
|
|
|
|
|
|
|
|
# Ajouter les noms des articles suivant et précédent
|
|
|
|
for i in range(len(sorted_basenames)):
|
|
|
|
basename = sorted_basenames[i]
|
|
|
|
if i > 0:
|
|
|
|
files_dict[basename]['previous'] = sorted_basenames[i - 1]
|
|
|
|
if i < len(sorted_basenames) - 1:
|
|
|
|
files_dict[basename]['next'] = sorted_basenames[i + 1]
|
|
|
|
|
2025-02-19 16:29:20 +01:00
|
|
|
with open(destination_json+'/articles_info.json', 'w', encoding='utf-8') as json_file:
|
2025-02-19 16:24:20 +01:00
|
|
|
files_dict_serialized = json.dumps(files_dict, ensure_ascii=False, indent=4)
|
|
|
|
json_file.write(files_dict_serialized)
|
|
|
|
|
|
|
|
|
|
|
|
# Afficher le dictionnaire pour vérification
|
|
|
|
# for basename, info in files_dict.items():
|
|
|
|
# print(f"Article: {basename}")
|
|
|
|
# print(f" Path: {info['path']}")
|
|
|
|
# print(f" tags: {info['tags']}")
|
|
|
|
# print(f" title: {info['title']}")
|
|
|
|
# print(f" Previous: {info['previous']}")
|
|
|
|
# print(f" Next: {info['next']}")
|
|
|
|
# print("-" * 40)
|