1. Getting non-generatable pages i.e. 05-06, cover, credit
2. Getting Episode Title
3. Renaming and compression to cbz
if [ -z "$1" ]; then
echo -e "\
manga.sh: missing URL
Usage: ./manga.sh [URL]
exit 0;
fi
# extracting info
no=`echo $1 | cut -d'/' -f5`;
initial=`echo $1 | cut -d '/' -f4 | tr -s '_' '\n' | cut -c 1`
initial=`echo $initial | sed -n 's/ //pg'`
site=`echo $1 | cut -d '.' -f2`;
echo $site;
mkdir -p "$initial.$no";
cd "$initial.$no";
#getting episode title
if [ ! -e startpage ]; then
wget "$1" -O "startpage";
fi;
title=`sed -n 's/.*Chapter Title: \(.*\)<.*/\1/p' startpage`
mkdir -p "$no.$title";
#getting first-page
page0=$(sed -n "s/.*href=.*$no\/\(.*\)\">.*Begin.*/\1/p" startpage);
if [ ! -e firstpage ]; then
wget "$1$page0" -O firstpage;
fi;
#getting initial list
if [ $site = "onemanga" ]; then
list="`sed -n "/id_page_select/,/select>/p" firstpage | tr -s "<>" "\n" | sed '4~4!d'`";
else
list=`sed -n "/id_page_select/,/select>/p" firstpage | sed "s/.*<.*>//p"`;
fi
echo $list;
if [ -z "$list" ]; then
echo "Couldn't retrive pages";
exit 0;
fi;
#getting link
page0=$(echo $page0 | tr -d '/');
link=$(sed -n "s/.*manga-page.*src=\"\(.*\)$page0.jpg.*/\1/p" firstpage);
if [ -z "$link" ]; then
echo "Couldn't retrive link";
exit 0;
fi;
#downloading
for i in $list; do
wget -c "$link$i.jpg" -O "$no.$title/$initial.${no}x$i.jpg";
echo "Problem downloading file"
exit 0
fi
done;
#compressing
zip -r "$no.$title.cbz" "$no.$title"
mv "$no.$title.cbz" "../$no.$title.cbz"
cd ..
rm -r "$initial.$no"