Commit 9509a338 authored by ale's avatar ale

removed old scripts which have been replaced

parent a252fcca
#!/bin/bash
bindir=/usr/share/ai-offlinescan/
rootdir=${1:-/home/disastro/backup/ai/ring0}
gawk --version > /dev/null 2>&1 || {
echo "Error: you need GNU awk (gawk) to run this script" 1>&2
exit 1
}
${bindir}/incremental-virus-scan.sh ${rootdir}/* \
| xargs -0 clamscan --no-summary --stdout --infected \
| ${bindir}/parse-scan-output.py \
| ${bindir}/contrib/ai/upload-scan-results.py
#!/usr/bin/python
#
# Upload the scan results to the a/i servers.
# Use SSO to authenticate to the receiver.
#
import cookielib
import json
import os
import sys
import urllib2
import logging
from sso.urllib2_handler import SSOProcessor
SUBMIT_URL = 'https://www.autistici.org/offlinescan/receiver'
log = logging.getLogger(__name__)
def get_credentials():
user = os.getenv('SSO_USER', 'offlinescan')
pwfile = os.path.expanduser(
os.getenv('SSO_PWFILE', '~/.offlinescan.pw'))
with open(pwfile, 'r') as fd:
pw = fd.read().strip()
return user, pw
def create_opener():
username, password = get_credentials()
jar = cookielib.CookieJar()
return urllib2.build_opener(
urllib2.HTTPCookieProcessor(jar),
SSOProcessor(username=username, password=password))
def read_results():
return json.load(sys.stdin)
def process_results(data):
log.info('processing data from %s (ts=%d)', data['host'], data['stamp'])
out = {}
for result in data['results']:
out.setdefault(result['user'], []).append({
'path': result['path'],
'virus': result['virus'],
'found_at': data['stamp']})
return out
def send_results(results):
try:
req = urllib2.Request(
SUBMIT_URL,
json.dumps(results),
{'Content-Type': 'application/json'})
resp = create_opener().open(req)
if resp.status_code != 200:
log.error('submit error: HTTP status %d', resp.status_code)
except Exception, e:
log.error('submit error: %s', e)
def setup_logging(level=logging.INFO):
logging.basicConfig(level=level)
if __name__ == '__main__':
setup_logging()
send_results(
process_results(
read_results()))
#!/bin/bash
#
# Scan a rdiff-backup metadata directory and Output the list of files
# that have changed since the last invocation.
#
# The output can be fed to analysis software for incremental scanning.
#
usage() {
echo "Usage: $0 [-n] [-r <run_dir>] [-x <exclude_pattern>]... <rdiff_root_directory>..." 1>&2
exit 1
}
run_dir=.
dry_run=${DRY_RUN:-n}
excludes=backups/mysql
while getopts hnr:x: flag; do
case $flag in
h)
usage;;
n)
dry_run=y;;
r)
run_dir="$OPTARG";;
x)
excludes="$OPTARG ${excludes}";;
esac
done
shift $(( OPTIND - 1 ))
test $# -lt 1 && usage
incoming_dirs="$@"
# Compute the exclude grep pattern.
exclude_pattern=""
if [ -n "${excludes}" ]; then
for e in ${excludes}; do
exclude_pattern="${exclude_pattern}${exclude_pattern:+|}${e}"
done
exclude_pattern="(${exclude_pattern})"
fi
abspath() {
echo $(cd $(dirname $1) && pwd)/$(basename $1)
}
# Scan a rdiff-backup metadata directory.
scan_dir() {
local dir="$1"
local stamp_file="$(abspath $2)"
local last_scanned_at=$(test -e ${stamp_file} && cat ${stamp_file})
(cd ${dir} ; \
find . -maxdepth 1 -type f -name 'mirror_metadata*.diff.gz' -print |
sort |
while read filename ; do
local stamp=$(echo ${filename} | sed -e 's/^.*mirror_metadata\.\(.*\)+.*\.diff\.gz$/\1/')
# Only process files which are newer than the last run.
if [ "${stamp}" \> "${last_scanned_at}" ]; then
# Save the new dataset stamp. This only works because the file list
# is sorted.
if [ ${dry_run} = "n" ]; then
echo "${stamp}" > "${stamp_file}"
fi
# Extract file names from the mirror_metadata.
zgrep "^File" "${filename}" | \
(if [ -n "${exclude_pattern}" ]; then
egrep -v "${exclude_pattern}"
else
cat
fi) | \
cut -d' ' -f 2-
fi
done)
}
for base_dir in ${incoming_dirs} ; do
if [ ! -d ${base_dir}/rdiff-backup-data ]; then
echo "Error: ${base_dir} does not seem to contain a rdiff backup" 1>&2
continue
fi
stamp_file=${run_dir}/$(basename ${base_dir}).stamp
scan_dir ${base_dir}/rdiff-backup-data ${stamp_file} | \
awk -v base_dir=$base_dir 'BEGIN { ORS="\0" } { print base_dir "/" $0 }'
done
#!/usr/bin/python
import json
import optparse
import re
import socket
import sys
import time
ROW_PATTERN = r'^(.*): (.*) FOUND$'
DEFAULT_PATTERN = r'(?P<host>[^/]+)/home/users/investici\.org/(?P<user>[^/]+)/(?P<path>.*)$'
# Normalize paths to utf-8.
def _tounicode(s):
try:
return unicode(s, 'utf-8')
except UnicodeDecodeError:
return unicode(s, 'iso-8859-1')
def extract_records(stream):
"""Split path and message (virus) from the input."""
row_pattern = re.compile(ROW_PATTERN)
for line in stream:
line = _tounicode(line.strip())
m = row_pattern.match(line)
if not m:
continue
yield m.group(1), m.group(2)
def parse_records(stream, pattern):
path_pattern = re.compile(pattern)
for path, msg in stream:
m = path_pattern.search(path)
if m:
out = m.groupdict()
out['virus'] = msg
out['local_path'] = path
yield out
def main():
parser = optparse.OptionParser()
parser.add_option('--regexp', default=DEFAULT_PATTERN,
help='File-matching pattern.')
opts, args = parser.parse_args()
if len(args) != 0:
parser.error('Too many arguments!')
results = list(parse_records(extract_records(sys.stdin), opts.regexp))
virusinfo = {
'host': socket.gethostname(),
'stamp': int(time.time()),
'results': results,
}
print json.dumps(virusinfo, indent=4).encode('utf-8')
if __name__ == '__main__':
main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment