Auto-éditer un wikilivre/Annexer (Version internationalisée)/src/add sclipwb.sh
Apparence
Les images sont téléchargées depuis le site wikibooks
Illustrations are downloaded from wikibooks
add_sclipwb.sh
[modifier | modifier le wikicode]This is the master command for personified source, copyright, license for images on wikibooks (sclipwb) and include en_sclipwb.inc or fr_sclipwb.inc
- en_sclipwb.inc content gettext for translations.
#! /bin/bash
#HF Fichier : add_sclipwb.sh
#HF Nom du fichier de commandes : add_sclipwb
#HF Syntaxe : "add_sclippwb <nom du livre> [ --t ]"
#HF Exemple : "./add_sclipwb LivreTest --t" à la console.
#HF La commande test est tests_add_sclipwb.bash dans le répertoire des tests
#HF
#DF La commande ./add_sclipwb <wikilivre> commence par créer la page
#DF
#DE the command ./add_sclipwb <Wiki-book>[--t], first create the page
#DE
TEXTDOMAIN=add_sclipwb
TEXTDOMAINDIR="/usr/share/locale"
VERSION=211008
#O . gettext.sh for translation
. gettext.sh
#O If first parameter is './add-sclipwb' print the add_sclipwb version
if [ "$0" = "./add-sclipwb" ]; then echo "add-sclipwb : Version "$VERSION; fi
#O Include the file header.inc
if test -e ~/Add_appendix/bin/header.inc; then source ~/Add_appendix/bin/header.inc
else echo $"$Bindir/header.inc not found. Execute sbin/cp-src2bin.sh"; exit -1
fi
#O Create variable PageSclipwb
Projectdir=$Workdir/books/$1
PageSclipwb="$Projectdir/$1.sclipwb"
echo $PageSclipwb
#O Inclure la commande scli.inc
source $Bindir/scli.inc
#O Wikibooks sclipwb personalized page initialization with the title 'Images sources, etc.
cat $Projectdir/$1.scli > $PageSclipwb
#O Include the command corresponding to the origin of the book, depending on execution in test mode
#O If tests mode
if [ "$2" = "--t" ]
then
#T pwd > pwd.txt; read Localdir < pwd.txt; rm pwd.txt; else Localdir=$Bindir; fi
{
pwd > pwd.txt; read Localdir < pwd.txt; rm pwd.txt
if [ "$Site" = "fr.wikibooks.org" ]; then source $Localdir/fr_sclipwb.inc.sh
else source $Localdir/en_sclipwb.inc.sh
fi
}
#O Else is not local test command with --t option
else
if [ "$Site" = "fr.wikibooks.org" ]; then source $Bindir/fr_sclipwb.inc
else source $Bindir/en_sclipwb.inc
fi
fi
# end file add_sclipwb.sh
en_sclipwb.inc
[modifier | modifier le wikicode]#! /bin/bash
#H File : en_sclipwb.inc included in add_sclipwb.sh
#P . gettext for translation
. gettext.sh
TEXTDOMAIN=en_sclipwb.inc
TEXTDOMAINDIR="/usr/share/locale"
VERSION=211008
cd $Projectdir
if [ "$2" = "--t" ]
then
echo "Projectdir = $Projectdir"
echo "en_sclipwb version = $VERSION"
fi
#O Variable $PageScliPwb definition
PageScliPwb=$Projectdir/$1.sclipwb
#O Create an identification loop of the directories corresponding to the articles
#O As long as we can read the lines of the file $Projectdir/$1.pj
while read pjline
do
#O If the line read is not $1 (name of the book)
#T echo "line read : " $line
if [ $pjline != $1 ]
#O Then:
then
#O Enter in the article directory,
cd $Projectdir/$pjline
#O Create image documentation files
#O Open the stream of $ Projectdir/$1/$1.str of the image and select
#O the character strings containing: File:, Image; and put in files
#O $Projectdir/$line/$line/.files, .pict, .illustration, .images, .links
cat $Projectdir/$pjline/$pjline.str | grep -n -i -e Fichier: -e file: -e image: | sed -f $RepCom/$Conversions > $Projectdir/$pjline/$pjline.files
cat $Projectdir/$pjline/$pjline.str | grep -n -i -e fichier: -e .jpg -e .png -e .gif | sed -f $RepCom/$Conversions> $Projectdir/$pjline/$pjline.picts
cat $Projectdir/$pjline/$pjline.files | grep title |sed "s/<\/div>//g" | awk -F">" '{print $NF}' > $Projectdir/$pjline/$pjline.illustrations
#T cat $Projectdir/$pjline/$pjline.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https | sed "s/\"/!/g" | cut -d '!' -f3 | grep https | tr '>' ! | sed "s/<\/a//g" |sed "s/!//g" > $Projectdir/$pjline/$pjline.links
cat $Projectdir/$pjline/$pjline.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https://$Site | sed "s/\"/!/g" | cut -d '!' -f2 > $Projectdir/$pjline/$pjline.images
#Tbreak
#O Download the image files from the wikimedia server.
#P Note: the -N option allows you to avoid downloading an up-to-date file,
#P and without adding a numbering.
#T #T wget -N -P $Projectdir/$pjline -i $Projectdir/$pjline/$pjline.images
wget -P $Projectdir/$pjline -r -linf -k -p -E -i $Projectdir/$pjline/$pjline.images
#O Copy the downloaded images to the directory of the current article.
cp $Projectdir/$pjline/$Site/wiki/*.html $Projectdir/$pjline/.
#O Initialize the html.list file with empty text.
echo -n "" > html.doublons
#O List the image files in the order of printing or display,
#O using the list $Projectdir/$pjline/$pjline.images
#O As long as we can read lines in $Projectdir/$pjline/$pictline.images
while read pictline
do
#O Cut the lignes at carriage return, sélect the last field and add '.html'
#echo $pictline | awk -F"/" '{for (i=1;i<=NF;i+=2) print $i "\n"}' #| cut -d '%' -f1 | cut -d '.' -f1 > tmp
echo $pictline | awk -F"/" '{ print $NF".html"}' >> html.doublons
#O Cut the duplicated lines and select even fields.
echo -n "" > html.list
awk 'BEGIN { FILENAME }
{memfile [NR] = $0 }
END { for ( i = 1 ; i <= NR ; i=i+2 ) {
print memfile[i] >> "html.list"
}
# print "Fin"
} ' html.doublons
#O fin tant que l'on peut lire des lignes dans $Projectdir/$pictline/$pictline.images
done < $Projectdir/$pjline/$pjline.images
#O Copy article name in file $1.sclip
echo "'''$(gettext ' Article : ')'''$pjline'''<br \>" >> $PageScliPwb
echo "'''$(gettext ' Article : ')'''$pjline'''"
#P## Appendix version 'wikibooks' ##############################
#O As long as there are (local) links in the html.list image file
while read htmlline
do
#O Display the line read,
echo ""
echo ""
echo "$(gettext ' ---- line read = $htmlline --- ')"
echo ""
#O Select the character strings from the image file $htmlline and copy
#O them to $ htmlline.str after replacing the character ',' with 'new-line'
mkd -pw '**' $htmlline $htmlline.tmp
cat $htmlline.tmp | tr ',' '\n' > $htmlline.str
#O images,
echo -n "'''$(gettext ' Illustration : ')'''" > $htmlline.title
cat $htmlline.str | grep wgTitle | cut -d '"' -f4 >> $htmlline.title
cat $htmlline.title >> $PageScliPwb
cat $htmlline.title
#O source,
echo -n "''$(gettext ', source : ')''https://"$Site"/w/index.php?title=" > $htmlline.source
echo $htmlline.str | sed "s/.html//g" | sed "s/.str//g" >> $htmlline.source
cat $htmlline.source >> $PageScliPwb
cat $htmlline.source
#O license,
echo -n "$(gettext ' , ''license : ')'' " > $htmlline.license
cat $htmlline.str | grep licensetpl_short | sed "s/<td>//g" | sed "s/<span class//g" | sed "s/<\/span>//g" | sed "s/style=\"display:none;\"//g" | tr '=' '\n' | grep licensetpl_short | awk -F">" '{print $NF}' >> $htmlline.license
cat $htmlline.license >> $PageScliPwb
cat $htmlline.license
#O author(s).
echo -n '' > tmp
echo -n "$(gettext ' , ''author(s) : '' ')" > $htmlline.author
cat $htmlline.str | grep -i -n -m1 -A 1 -e Author -e Auteur | tr '/' '\n' | grep -i -e user -e utilisteur -e auteur -e author | cut -d '"' -f1 | grep -i -e user -e utilisteur -e auteur -e author > tmp
read Tmp < tmp
#T
echo "Tmp = $Tmp"
if [ "$Tmp" = "" ]
#T then echo "tmp vide"
then echo "-" > tmp
fi
#T echo $Tmp | cut -d '-' -f2 | sed "s/\.\ /%/g" | cut -d '%' -f1
cat tmp >> $htmlline.author
cat $htmlline.author >> $PageScliPwb
cat $htmlline.author
#O Finish the page $PageScliPwb
echo "" >> $PageScliPwb
#O End of as long as there are lines in html.list
done < html.list
#O End of 'if the line is not the name of book'.
fi
#O End of tq .pj
done < $Projectdir/$1.pj
#O Finish the page $PageScliPwb
echo "</div>" >> $PageScliPwb
echo "$(gettext ' {{Newpage}} ')" >> $PageScliPwb
exit 0
# End of en_sclipwb.inc.sh
fr_sclipwb.sh
[modifier | modifier le wikicode]#! /bin/bash
#H File : fr_sclipwb.inc include in add_sclipwb.sh
Version=210707
cd $Projectdir
if [ "$2" = "--t" ]
then
echo "Projectdir = $Projectdir"
echo "fr_sclipwb version = $Version"
fi
#O Définition de la variable $PageScliPwb
PageScliPwb=$Projectdir/$1.sclipwb
#O Créer une boucle d'identification des répertoires correspondants aux article
#O du livre.
#O Tant que l'on peut lire les lignes du fichier $Projectdir/$1.pj
while read pjline
do
#O Si la ligne lue n'est pas $1 (nom du livre)
#T echo "ligne lue : " $line
if [ $pjline != $1 ]
#O Alors:
then
#T echo "====="
#T echo ""
#T echo "ligne prise en compte : " $pjline
#O Entrer dans le répertore de l'article,
cd $Projectdir/$pjline
#T echo -n "répertoire courant : "
#T pwd
#T break
#O Créer les fichiers de documentation des images de la page.
#O Ouvrir le flux $Projectdir/$1/$1.str de limage et sélectionner les chaînes de
#O caractères contenant Fichier:, File:, Image:, les copier dans les nouveaux
#O fichiers $Projectdir/$line/$line/.files, .pict, .illustration, .images, .links
cat $Projectdir/$pjline/$pjline.str | grep -n -i -e Fichier: -e file: -e image: | sed -f $RepCom/$Conversions > $Projectdir/$pjline/$pjline.files
cat $Projectdir/$pjline/$pjline.str | grep -n -i -e fichier: -e .jpg -e .png -e .gif | sed -f $RepCom/$Conversions> $Projectdir/$pjline/$pjline.picts
cat $Projectdir/$pjline/$pjline.files | grep title |sed "s/<\/div>//g" | awk -F">" '{print $NF}' > $Projectdir/$pjline/$pjline.illustrations
#T cat $Projectdir/$pjline/$pjline.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https | sed "s/\"/!/g" | cut -d '!' -f3 | grep https | tr '>' ! | sed "s/<\/a//g" |sed "s/!//g" > $Projectdir/$pjline/$pjline.links
cat $Projectdir/$pjline/$pjline.files | awk -F"=" '{for (i=1;i<=NF;i++) print $i "\n"}' | grep https://fr.wikibooks.org | sed "s/\"/!/g" | cut -d '!' -f2 > $Projectdir/$pjline/$pjline.images
#Tbreak
#O Télécharger les fichiers d'images depuis le serveur wikimedia.
#P Remarque : l'option -N permet d'éviter de téléchager un fichier à jour
#P et sans ajouter une numérotation.
#T #T wget -N -P $Projectdir/$pjline -i $Projectdir/$pjline/$pjline.images
wget -P $Projectdir/$pjline -r -linf -k -p -E -i $Projectdir/$pjline/$pjline.images
#O Copier les images téléchargées dans le répertoire de l'article courant.
cp $Projectdir/$pjline/fr.wikibooks.org/wiki/*.html $Projectdir/$pjline/.
#O Initialiser le fichier de liste html.list avec un texte vide.
echo -n "" > html.doublons
#O Lister les fichiers d'images dans l'ordre d'impression ou de l'affichage,
#O à laide de la liste $Projectdir/$pjline/$pjline.images
#O Tant que l'on peut lire des lignes dans $Projectdir/$pjline/$pictline.images
while read pictline
do
#O Couper les lignes au retour chariot, sélectionner le dernier champ et ajouter '.html'
#echo $pictline | awk -F"/" '{for (i=1;i<=NF;i+=2) print $i "\n"}' #| cut -d '%' -f1 | cut -d '.' -f1 > tmp
echo $pictline | awk -F"/" '{ print $NF".html"}' >> html.doublons
#O Couper les doublons et sélectionner les champs pairs.
echo -n "" > html.list
awk 'BEGIN { FILENAME }
{memfile [NR] = $0 }
END { for ( i = 1 ; i <= NR ; i=i+2 ) {
print memfile[i] >> "html.list"
}
# print "Fin"
} ' html.doublons
#O Fin tant que l'on peut lire des lignes dans $Projectdir/$pictline/$pictline.images
done < $Projectdir/$pjline/$pjline.images
#T Afficher html.list
#T cat html.list
#O Copier le nom de l'article dans $1.sclip
echo "'''Article : $pjline'''<br \>" >> $PageScliPwb
echo "'''Article : $pjline'''"
#P## Annexe version 'wikibooks' ##############################
#O Tant qu'il y a des liens (locaux) dans le fichier d'images html.list
while read htmlline
do
#O Afficher la ligne lue,
echo ""
echo ""
echo "---- ligne lue = $htmlline ---"
echo ""
#O Sélectionner les chaînes de caractères du fichier image $htmlline et les
#O copier dans $htmlline.str après remplacement du caractère ',' par 'new-line'
mkd -pw '**' $htmlline $htmlline.tmp
cat $htmlline.tmp | tr ',' '\n' > $htmlline.str
#O images,
echo -n "'''Illustration : '''" > $htmlline.title
cat $htmlline.str |grep wgTitle | cut -d '"' -f4 >> $htmlline.title
cat $htmlline.title >> $PageScliPwb
cat $htmlline.title
#O source,
echo -n ", ''source : ''https://"$Site"/w/index.php?title=" > $htmlline.source
echo $htmlline.str | sed "s/.html//g" | sed "s/.str//g" >> $htmlline.source
cat $htmlline.source >> $PageScliPwb
cat $htmlline.source
#O license,
echo -n ", ''licence : ''" > $htmlline.license
cat $htmlline.str | grep licensetpl_short | sed "s/<td>//g" | sed "s/<span class//g" | sed "s/<\/span>//g" | sed "s/style=\"display:none;\"//g" | tr '=' '\n' | grep licensetpl_short | awk -F">" '{print $NF}' >> $htmlline.license
cat $htmlline.license >> $PageScliPwb
cat $htmlline.license
#O auteur(s).
echo -n '' > tmp
echo -n ", ''auteur(s) : ''" > $htmlline.auteur
cat $htmlline.str | grep -i -n -m1 -A 1 -e Author -e Auteur | tr '/' '\n' | grep -i -e user -e utilisteur -e auteur -e author | cut -d '"' -f1 | grep -i -e user -e utilisteur -e auteur -e author > tmp
read Tmp < tmp
#T
echo "Tmp = $Tmp"
if [ "$Tmp" = "" ]
#T then echo "tmp vide"
then echo "-" > tmp
fi
#T echo $Tmp | cut -d '-' -f2 | sed "s/\.\ /%/g" | cut -d '%' -f1
cat tmp >> $htmlline.auteur
cat $htmlline.auteur >> $PageScliPwb
cat $htmlline.auteur
#O Terminer la page $PageScliPwb
echo "" >> $PageScliPwb
#O Fin du tant qu'il y a des lignes
done < html.list
#O Fin du 'si la ligne n'est pas le nom du livre'.
fi
#O Fin du tq .pj
done < $Projectdir/$1.pj
#O Terminer la page $PageScliPwb
echo "</div>" >> $PageScliPwb
echo "{{Nouvelle page imprimée}}" >> $PageScliPwb
exit 0
# Fin de fr_sclipwb.inc.sh
tests_add_sclipwb.bash
[modifier | modifier le wikicode]#! /bin/bash
#P file : tests_add_sclipwb.bash
#P Syntax ./tests_add_sclipwb.bash <short-bookname> [ --t ]
VERSION=210707
#O Clean screen
clear
#O Include install variables
pwd > pwd.txt
if cat pwd.txt | grep ~/Annexer; then Workdir=~/Annexer; elif cat pwd.txt | grep ~/Add_appendix; \
then Workdir=~/Add_appendix; else echo "Workind directory not found"; exit -1;fi
rm pwd.txt
#O add_sclipwb global variables
source $Workdir/vars/installdir.var
#O Print tests_add_sclipwb.bash version
echo " tests_add_sclipwb.bash version = $VERSION"
sleep 2
#T echo "----"
#O Control if the test add_sclipwb.sh file is clean
echo -e "\033[1;33m Control add_sclipwb.sh in the local test directory \033[0m" > text-control.txt
echo -e "\033[1;33m Please check that the comment line does not contain an command \033[0m" >> text-control.txt
echo >> text-control.txt
echo -e "\033[1;33m### add_sclipwb.sh ###\033[0m" >> text-control.txt
grep -n -e "#T\|#O\|#P" add_sclipwb.sh >> text-control.txt
echo >> text-control.txt
echo -e "\033[1;33m###### en_sclipwb.inc.sh ###\033[0m" >> text-control.txt
grep -n -e "#T\|#O\|#P" en_sclipwb.inc.sh >> text-control.txt
echo >> text-control.txt
echo -e "\033[1;33m###### fr_sclipwb.inc.sh ###\033[0m" >> text-control.txt
grep -n -e "#T\|#O\|#P" fr_sclipwb.inc.sh >> text-control.txt
echo -e "\033[1;33m ---- \033[0m" >> text-control.txt
more text-control.txt
echo; echo -e "\033[47m\033[1;30m Continue ? \033[0m"
read -s -e -n 1 -t 60 -p "y/n ? : " Inkey;echo
#T echo "Inkey = $Inkey"
if [ "$Inkey" != 'y' ]; then echo " Inkey is not yes, or time out after 60 sec, exit from $0"; exit 0;fi
#O Test with first param empty
echo "Command ./add_sclipwb.sh without parameter"
./add_sclipwb.sh
echo;echo " wait for 2 sec"
sleep 2; echo "----"
#O Test, all with option --t
echo " All tests whith parameter --t"
#O add_sclipwb local books for tests
Inkey=1
for i in 'seq 1 9'
do
cat $Workdir/tests/sitelist.txt; echo
echo -e "\033[47m\033[1;30m Choose the site number to test \033[0m"
read -s -n1 -p "inkey number 1 to 9 : " Inkey; echo;
if [ "$Inkey" = "1" ]; then ./add_sclipwb.sh LivreTest --t
elif [ "$Inkey" = "2" ]; then ./add_sclipwb.sh Hélices_de_navires_à_déplacement --t
elif [ "$Inkey" = "3" ]; then ./add_sclipwb.sh TestBook --t
elif [ "$Inkey" = "4" ]; then ./add_sclipwb.sh Mkd_\(Extracteur_de_documents\) --t
elif [ "$Inkey" = "5" ]; then ./add_sclipwb.sh Faire_fleurir_le_sel --t
elif [ "$Inkey" = "6" ]; then ./add_sclipwb.sh Faire_sa_fleur_de_sel --t
elif [ "$Inkey" = "7" ]; then ./add_sclipwb.sh Guide_to_Unix --t
elif [ "$Inkey" = "8" ]; then ./add_sclipwb.sh Tests_de_la_capacité_des_batteries_d%27accumulateurs --t
elif [ "$Inkey" = "9" ]; then ./add_sclipwb.sh LivreTest --t
else echo "$inkey local url of site not found"; exit 0
fi
done
#O End tests_add_sclipwb.bash