-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathformat_report_for_wiki
executable file
·80 lines (74 loc) · 2.97 KB
/
format_report_for_wiki
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#!/bin/bash
# This script will grab the files that will be created after a scan,
# process then to MediaWiki format and then strip out html markup that
# breaks the formatting when viewing on the wiki.
# Also takes name of the file as the name for the individual reports.
# NAMING CONVENTION FOR FILES: machine-name/item-name.
# Instead of spaces(!), use hyphens in the name of the scan target.
# The script searches for the first underscore and takes everything
# before it as the scan target, adding in the name to the Summary section
# in the report.
# Creation date: 9th September 2011
# Amended 26th September 2011
# Author: Gordon Thomson
# Version 1.0
# Fixes:
# 4/10/2011 - formatting was askew in the wiki file - sub headers were given the same prominence as
# the main header per scan target. Script now gives correct prominence to sub headers.
# get the passed in directory
directory=$1
# and the current directory
current_dir=$2
# grab the end location from params
date_location=$3
# Final filename
wiki_name=$4
# grab the date
date_run=`date +%d_%m_%Y`
year=`date +%Y`
month=`date +%B`
# create the files
touch /tmp/$date_run".html"
echo -e "\n"
for files in `find $directory -maxdepth 2 -mindepth 1 -type f`
do
if [ "$(file -b $files | grep 'HTML document')" ];
then
# grab the first section of the filename (everything before the '_')
filename=$(basename "$files")
# this would get the extension. Not sure if it copes with .tar.gz though.
extension=${filename# # *.}
file_name_ext=${filename%.*}.$extension
name_of_file=$(echo $file_name_ext | cut -d'_' -f1)
# create a temp file to avoid overwriting the original
cat $files > $files.temp
# remove top nine lines (including the <h1>Summary</h1> part)
sed -i '1,8d' $files.temp
# remove two bottom lines (and spaces between)
head -n -5 $files.temp > $files.temp2
# replace the summary part, including the filename
sed -i -e '1c \<h1>Summary for '$name_of_file'</h1>' $files.temp2
# Also need to cater for every other machine/item scanned, the above replaces only the first.
# sed -i 's/">Host/">Host '$name_of_file'-/g' $files
# Put the temp file into the overall HTML file
cat $files.temp2 >> /tmp/$date_run".html"
# Couple of line breaks to separate content.
echo '<br /><br />' >> /tmp/$date_run".html"
echo $files" has been processed."
# Clean up
rm $files.temp
rm $files.temp2
continue
elif [ "$(file -b $files | grep 'ASCII English text, with very long lines')" ];
then
continue
else
continue
fi
done
echo "Converting to MediaWiki format..."
html2wiki --strip-tags --dialect MediaWiki --no-wrap-in-html --no-escape-entities /tmp/$date_run".html" > /tmp/$date_run".wiki"
echo "Correcting indentation structure..."
sed -i 's/=Results per Host=/==Results per Host==/g' /tmp/$date_run".wiki"
echo "Done."
rm /tmp/$date_run".html"