Bash Scripts by Chris Heath

f2bstatus | server.backup.sh | dir2name.sh | imgur4sale.sh | progress.sh | newHTML.sh | case.folders.sh | txt2html.sh | detect.OS.sh | wp2html | [back to chrisheath.us ⤶]


f2bstatus [back to top]
Show status of all fail2ban jails at once.

1
2
3
4
5
6
7
#!/bin/bash

JAILS=`fail2ban-client status | grep "Jail list" | sed -E 's/^[^:]+:[ \t]+//' | sed 's/,//g'`
for JAIL in $JAILS
do
  fail2ban-client status $JAIL
done


server.backup.sh [back to top]
Compress and archive a desired location to backup.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
#!/bin/bash
# Author: Chris Heath
# Date: 12/13/2018
# Description/Notes: This script compresses and archives a 
# location that you specify and moves it to a downloadable
# location for offline backup.
backupLocation=BACKUP.PATH.GOES.HERE
backupDownload=PATH.TO.DOWNLOAD.BACKUP
#
dir=$PWD
dt=$(date +%Y-%m-%d)
cd $backupLocation
sudo tar czf /tmp/server.backup.$dt.tar.gz ./*
sudo mv /tmp/server.backup.* $backupDownload
cd $dir
echo ""
echo " Backup ready for download at: "
echo "[enter your publicly facing url for download here]/server.backup.$dt.tar.gz"
echo ""


dir2name.sh [back to top]
Parses all directories under PWD and renames files inside each directory to match the directory name (preserving the file extenstion).

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
#!/bin/bash
#
# Author: Chris Heath
# Date: Dec, 1 2018
# Description: Parses all directories under PWD and renames files inside each
# directory to match the directory name (preserving the file extenstion).

for dir in *; do
 if [ -d "$dir" ]; then
  for file in $dir/*; do
   if [ ! -d "$file" ]; then
    match=$(basename "${file%.*}")
    echo $dir
    echo $match
    ext="${file##*.}"
    echo $ext
    mv $dir/$match.$ext $dir/$dir.$ext
   fi
  done
 fi
done


imgur4sale.sh [back to top]
Upload images to imgur and create an html code snippet for online sales sites like craigslist and ebay.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
#!/bin/bash
#
# Author: Chris Heath
# Date: Nov, 20 2018
# Description: Upload images in current directory to imgur and create an html code snippet
# that can be used with online sales sites like craigslist and ebay. Images are resized to
# 640px wide and the originals are kept as "original.filename.ext".
#
# Adapted from: https://github.com/tremby/imgur.sh
#
# API Key NOT provided 
# replace XXXXXXX with your own or specify
# yours as IMGUR_CLIENT_ID envionment variable
# to avoid limits
#
# Declare variables and constants
default_client_id=XXXXXXXXXXXXXXXX
client_id="${IMGUR_CLIENT_ID:=$default_client_id}"
index=0
declare -a imgur
TITLE=${PWD##*/}
declare -a name
clip=""
errors=false
# If the manifest file exists then make a backup
if [ -e forsale.txt ]; then
 mv forsale.txt forsale.txt.bak
 touch forsale.txt
else
 touch forsale.txt
fi
# Function to output usage instructions
function usage {
    echo "###########################################################################################"
    echo "#"
    echo "#  Usage: $(basename $0) [filename|URL [...]]"
    echo "#"
    echo "#  Upload images to imgur and output their new URLs to stdout. Each one's"
    echo "#  delete page is output to stderr between the view URLs."
    echo "#"
    echo "#  When no filename is given, parse the current folder for images."
#   echo "#  A filename can be - to read from stdin. If no filename is given, stdin is read."
    echo "#"
    echo "#  Also, create two text files (forsale.html and forsale.txt)."
    echo "#  forsale.html ... code snipped ready for online selling sites."
    echo "#  forsale.txt .... manifest file containing image and delete links."
    echo "#"
    echo "#  If xsel, xclip, or pbcopy is available, the URLs are put on the X selection for"
    echo "#  easy pasting."
    echo "#"
}
# Function to upload a path
# First argument should be a content spec understood by curl's -F option
function upload {
	curl -s -H "Authorization: Client-ID $client_id" -H "Expect: " -F "image=$1" https://api.imgur.com/3/image.xml
	# The "Expect: " header is to get around a problem when using this through
	# the Squid proxy. Not sure if it's a Squid bug or what.
}
# Check curl is available
type curl &>/dev/null || {
	echo "Couldn't find curl, which is required." >&2
	exit 17
}
# Check arguments
if [ "$1" == "-h" -o "$1" == "--help" ]; then
	usage
	exit 0
elif [ $# -eq 0 ]; then
#	echo "No file specified; reading from stdin" >&2
#	exec "$0" -
   echo "No file specified. Parsing current folder for images."
   # Get parse the current folder and create temp list of filenames
   touch test.tmp
   {
    find . -maxdepth 1 -name '*' -exec file {} \; | grep -o -P '^.+: \w+ image' | cut -d':' -f1 | cut -c 3-
   } > test.tmp
   # Read in the filenames to an array.
   while read -r imgName; do
    cp $imgName "original.$imgName"
    mogrify -resize 640x $imgName
    name[$index]="$imgName"
    ((index++))
   done < test.tmp
   # Cycle through the array and upload to imgur.
   for (( i=0; i<index; i++ )); do
        file=${name[$i]}
        response=$(upload "@$file") 2>/dev/null
        # Parse the response and output our stuff
        url="${response##*<link>}"
        url="${url%%</link>*}"
        delete_hash="${response##*<deletehash>}"
        delete_hash="${delete_hash%%</deletehash>*}"
        echo $url | sed 's/^http:/https:/'
        echo "Delete page: https://imgur.com/delete/$delete_hash" >&2
        {
         echo "[ " $file " ]"
         echo "  "$url | sed 's/^http:/https:/'
         echo "  Delete page: https://imgur.com/delete/$delete_hash"
         echo ""
        } >> forsale.txt
        # Create URL array.
        imgur[$i]="$url"
        # Append the URL to a string so we can put them all on the clipboard later
        clip+="$url"
        if [ i=index-1 ]; then
                clip+=$'\n'
        fi
   done
   rm test.tmp #remove the temp file
fi
# Loop through arguments
while [ $# -gt 0 ]; do
	file="$1"
        cp $file "original.$file"
        mogrify -resize 640x $file
	shift
	# Upload the image
	if [[ "$file" =~ ^https?:// ]]; then
		# URL -> imgur
		response=$(upload "$file") 2>/dev/null
	else
		# File -> imgur
		# Check file exists
		if [ "$file" != "-" -a ! -f "$file" ]; then
			echo "File '$file' doesn't exist; skipping" >&2
			errors=true
			continue
		fi
		response=$(upload "@$file") 2>/dev/null
	fi
	if [ $? -ne 0 ]; then
		echo "Upload failed" >&2
		errors=true
		continue
	elif echo "$response" | grep -q 'success="0"'; then
		echo "Error message from imgur:" >&2
		msg="${response##*<error>}"
		echo "${msg%%</error>*}" >&2
		errors=true
		continue
	fi
	# Parse the response and output our stuff
	url="${response##*<link>}"
	url="${url%%</link>*}"
	delete_hash="${response##*<deletehash>}"
	delete_hash="${delete_hash%%</deletehash>*}"
	echo $url | sed 's/^http:/https:/'
	echo "Delete page: https://imgur.com/delete/$delete_hash" >&2
        # Create URL array.
        imgur[$index]="$url"
        ((index++))
        {
         echo "[ " $file " ]"
         echo "  "$url | sed 's/^http:/https:/'
         echo "  Delete page: https://imgur.com/delete/$delete_hash"
        } >> forsale.txt
	# Append the URL to a string so we can put them all on the clipboard later
	clip+="$url"
	if [ $# -gt 0 ]; then
		clip+=$'\n'
	fi
done
 {
echo "<style>"
echo "   div.main {"
echo "   margin:15px 10% 15px 10%;"
echo "   padding: 5px;"
echo "   background-color: gray;"
#echo "   background-image: url(\"../background.jpg\");"
echo "  }"
echo "  div.title {"
echo "   margin: 30px 20% 15px 20%;"
echo "   background: white;"
echo "  }"
echo "  div.desc {"
echo "   margin: 15px 10% 15px 10%;"
echo "   background: white;"
echo "  }"
echo "  p.title {"
echo "   text-align: center;"
echo "   font-size: larger;"
echo "   padding: 5px;"
echo "  }"
echo "  div.pix p {"
echo "   text-align: center;"
echo "  }"
echo "  div.desc p {"
echo "   text-align: center;"
echo "   padding: 5px;"
echo "}"
echo "</style>"
echo ""
echo "<div class=\"main\">"
echo " <div class=\"title\">"
echo "  <p class=\"title\">"$TITLE"</p>"
echo " </div>"
echo " <div class=\"desc\">"
echo "  <p> description</p>"
echo " </div>"
echo " <div class=\"pix\">"
 } > forsale.html
 for (( i=0; i<index; i++ )); do
   echo "<p><img src=\"${imgur[$i]}\"></p>" >> forsale.html
 done
 {
 echo " </div>"
 echo "</div>"
 } >> forsale.html
# Put the URLs on the clipboard if we can
if type pbcopy &>/dev/null; then
	echo -n "$clip" | pbcopy
elif [ $DISPLAY ]; then
	if type xsel &>/dev/null; then
		echo -n "$clip" | xsel -i
	elif type xclip &>/dev/null; then
		echo -n "$clip" | xclip
	else
		echo "Haven't copied to the clipboard: no xsel or xclip" >&2
	fi
else
	echo "Haven't copied to the clipboard: no \$DISPLAY or pbcopy" >&2
        echo $clip
fi
if $errors; then
	exit 1
fi


progress.sh [back to top]
Dislays an 'in-progress' animation while waiting for a command to compolete.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/bin/bash
#
# Author: Chris Heath
# Date: 12/9/2018
# Description: A progress meter for bash commands.
# Usage: "progress.sh [COMMAND]" 
#        All text after the script name is treated as a
#        new bash command and an in-progress bar is
#        displayed until the command exits.
spinner=( [X---------]
          [-X--------]
          [--X-------]
          [---X------]
          [----X-----]
          [-----X----]
          [------X---]
          [-------X--]
          [--------X-]
          [---------X]
          [--------X-]
          [-------X--]
          [------X---]
          [-----X----]
          [----X-----]
          [---X------]
          [--X-------]
          [-X--------]);

if [ $# -gt 0 ]; then
 for i in $@; do
  CMD+=" $i"
 done
 echo "Please wait while" $CMD" finishes..."
 eval $CMD & PID=$!
else
 sleep 10 & PID=$!
 echo "No command passed.  Running with 10 sec. sleep timer."
fi
start=$SECONDS
while kill -0 $PID 2> /dev/null; do
    for i in ${spinner[@]};
    do
      duration=$(( SECONDS - start ))
      echo -ne "\r$i ${duration}s";
      sleep 0.1;
    done;
done
printf "\ndone!\n"


newHTML.sh [back to top]
Create a simple html file. Ask the user for filename and title.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#!/bin/bash
#
# Author: Chris Heath
# Date: Nov. 18, 2018
# Description: Create a simple html file. Ask the user for filename and title.
#

echo -n "Enter filename: "
read FILE

echo -n "Enter webpage title: "
read TITLE

{
echo "<html>"
echo " <head>"
echo "  <title> $TITLE </title>"
echo " </head>"
echo " <body>"
echo "  "
echo " </body>"
echo "</html>"
} > "$FILE"

echo "Created:" $FILE 


case.folders.sh [back to top]
Converts text files to html files by adding br tags to the end of each line.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
#!/bin/bash
#
# Author: Chris Heath
# Date: Feb. 25, 2018
# Description: Rename all files and folders in  the current directory
#              by capitalizing the first letter of each word in the
#              file or directory name, and then move each file not in
#              a directory into a self-titled directory.
#              Can be modified slightly fairly easily for specific uses.

rename 's/\b(\w)\B/\u$1/g' *

for file in *; do
  if [[ -f "$file" ]]; then
    mkdir "${file%.*}"
    mv "$file" "${file%.*}"
  fi
done


txt2html.sh [back to top]
Converts text files to html files by adding br tags to the end of each line.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
#!/bin/bash
#
# Author: Chris Heath
# Date: Nov. 17, 2018
# Description: Take text and add html tags
# Usage: txt2html.sh <file1> [file2]...

function usage {
    echo "USAGE: $(basename $0) file1 file2 ..."
    exit 1
}

if [ $# -eq 0 ] ; then
    usage
fi

for file in $* ; do
   html=$(echo $file | sed 's/\.txt$/\.html/i')

   echo "<html>" > $html
   echo "   <body>" >> $html

   while read line ; do
      echo "$line<br />" >> $html
   done < $file

   echo "   </body>" >> $html
   echo "</html>" >> $html
done


scan.network.sh [back to top]
Uses nmap to either do a quick ping-scan or a deeper port-scan.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
#!/bin/bash
#
# Author: Chris Heath
# Description: Uses nmap to either do a quick ping-scan or a deeper port-scan.


read -p "Enter the IP (or IP range) to scan: " host

read -p "Do you want a deeper port scan? (y/n): " -n 1 -r

echo

if [[ $REPLY =~ ^[Yy]$ ]]
then
 nmap -v -Pn $host
else
 nmap -sn $host
fi


detect.OS.sh [back to top]
Uses nmap to try and detect the Operating System in use.

1
2
3
4
5
6
7
8
9
#!/bin/bash
#
# Author: Chris Heath
# Description: Scan host with nmap OS detection


read -p "enter ip or hostname to scan for OS type: " host

nmap -A -v $host


wp2html [back to top]
Extract post data from a Wordpress database and create a very basic html page for each post as well as an index page.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#!/bin/bash
#
# Author: Chris Heath
# Description: This script reads data from the 'crunch' mysql database.  The 'crunch' database is a backup of an old wordpress blog of mine and this script harvests the relevant fields from the 'posts' table and creates html pages for each post.  An archive page is also created with a list of all the posts to be used as a homepage for the archive.  Each page for each post will have links to the next and previous posts as well as back to the archive's homepage.

# DEBUG: Test the database connection and check how many rows/records/posts we will be dealing with
# $rows=$(mysql --login-path=local -D crunch -se "SELECT COUNT(ID) FROM wp_posts WHERE post_parent=0;")
# echo $rows

# Gather the first set of data and start creating each webpage
# (we cannot gather all the data at once using this method because fields in the database that have spaces in them have to be read in last, therefore we can only read one field with a space at time)
mysql --login-path=local -D crunch -se "SELECT ID, post_title FROM wp_posts WHERE post_parent=0;" | while read idx post_title; do
{
echo "
<html>
 <head>
  <title> $post_title </title>"
} > $idx.html
done

# Declare index counter variable and date array
index=0
declare -a date

# Gather the next set of data this time using file redirection because the structure used last time (with a pipe) creates a subshell and our variables don't survive the pipe
mysql --login-path=local -D crunch -se "SELECT ID, post_date FROM wp_posts WHERE post_parent=0;" > date_time.tmp

# Send the temp file back through STDIN to be read
exec 0<date_time.tmp

# Create the next section of the webpage with the data we just read
# (Note: The post_date field has a space between the date and the time so we need to only use the first 10 characters which contain the date.)
while read idx post_date; do
{
echo " </head>
 <body>
  <p>Originally posted on: ${post_date:0:10} </p>"
} >> $idx.html

# Add the date to our date array and increment the counter
date[$index]=${post_date:0:10}
((index++))
done

# Create the next section of the webpage (This could probably be condensed with one of the other sections because guid does not contain spaces)
mysql --login-path=local -D crunch -se "SELECT ID, guid FROM wp_posts WHERE post_parent=0;" | while read idx guid; do
{
echo "  <p>Original location: $guid </p>
  <hr />
  <div>"
} >> $idx.html
done

# Create the main post content section of the webpage
mysql --login-path=local -D crunch -se "SELECT ID, comment_count, post_content FROM wp_posts WHERE post_parent=0;" | while read idx comment_count post_content; do
{
echo "  <p>$post_content</p>
  </div>
  <hr />
  <p> $comment_count comments (not shown)</p>"
} >> $idx.html
done

# Declare name, title, and key arrays.  Also reset the index counter.
index=0
declare -a name
declare -a title
declare -a key

# Use file redirection again to gather the next set of data since our arrays and variables won't survive a pipe
mysql --login-path=local -D crunch -se "SELECT ID, post_name, post_title FROM wp_posts WHERE post_parent=0;" > title_name.tmp

# Send the temp file back through STDIN to be read
exec 0<title_name.tmp

# Fill our arrays with the data we just read from the database so we can create the footer of the webpage (with links to next/previous posts)
while read idx post_name post_title; do
    {
    name[$index]="$post_name.html"
    title[$index]=$post_title
    key[$index]=$idx
    rows=$index
    ((index+=1))
    }
done

# DEBUG: echo out the last idx
# echo ${key[$rows]}

# Reset the counter again
index=0

# Iterate through the arrays once to create the footer for each webpage
while [ $index -le $rows ]; do
{
echo "  <hr />"

# If not the first page (there is no previous page to the first page)
if [ $index -gt 0 ]; then
echo "  <p>Previous Post: <a href=\"" ${name[$index-1]} "\">" ${title[$index-1]} "</a></p>"
fi
# If not the last page (there is no next page after the last page)
if [ $index -lt $rows ]; then
echo "  <p>Next Post: <a href=\"" ${name[$index+1]} "\">" ${title[$index+1]} "</a></p>"
fi
# Also add a link back to the main archive index homepage
echo "  <p>Back to <a href=\"index.html\">archive index</a></p>
  <hr /><br /><br />
 </body>
</html>"
} >> ${key[$index]}.html

# Create new 'proper' name for each page then delete the idx numbered html file, and don't forget to increment the counter
cat ${key[$index]}.html > ${name[$index]}
rm ${key[$index]}.html
((index+=1))
done

# Now that all the webpages have been created let's create the archive's index/homepage

# Reset the counter
index=0

# Begin creating a new webpage
{
echo "<html>
 <head>
  <title> Heathbar's Crunch Blog Archive Index </title>
 </head>
 <body>
  <h2> Heathbar's Crunch Blog Archive Index </h2>
  <hr />
  <ul>"
# Iterate through our arrays once more time creating our list of pages
while [ $index -le $rows ]; do
    echo "   <li><a href=\""${name[$index]}"\">"${title[$index]}"</a> - "${date[$index]}"</li>"
    ((index++))
done
echo "  </ul>
 </body>
</html>"
} > index.html
# Archive index/homepage created

# Remove temporary files
rm date_time.tmp
rm title_name.tmp


[back to chrisheath.us ⤶]