--- /dev/null
+.PHONY: clean check
+
+CLIENTS = aria transmission mainline bittornado azureus tribler hrktorrent
+
+clean: check
+ for i in $(CLIENTS); do \
+ rm -fr clients/$$i/dld/* clients/$$i/res/* clients/$$i/log/*; \
+ done
+
+check:
+ for i in $(CLIENTS); do \
+ mkdir clients/$$i/dld clients/$$i/res clients/$$i/log 2> /dev/null; \
+ done; \
+ true
--- /dev/null
+clients/
+ - bittorrent clients
+ - contains
+ - run.bash (running script)
+ - config.bash (configure script)
+ - dld/ (download directory)
+ - log/ (logging directory)
+ - res/ (results)
+
+meta/
+ - bittorrent metafiles (.torrent files)
+
+run.bash (running script)
+config.bash (configure script)
+add_client.bash (add new client - create directory tree in clients/)
+track.bash (run tracker)
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 1; then
+ echo "Usage: $0 nume_client"
+ exit 1
+fi
+
+source config.bash
+CLIENT_NAME=$1
+
+mkdir $CLIENTS_DIR/"$CLIENT_NAME"
+touch $CLIENTS_DIR/"$CLIENT_NAME"/run.bash
+touch $CLIENTS_DIR/"$CLIENT_NAME"/config.bash
+mkdir $CLIENTS_DIR/"$CLIENT_NAME"/dld
+echo '*' > $CLIENTS_DIR/"$CLIENT_NAME"/dld/.gitignore
+mkdir $CLIENTS_DIR/"$CLIENT_NAME"/log
+mkdir $CLIENTS_DIR/"$CLIENT_NAME"/res
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+kill -9 $(ps -ef | grep p2p | grep '/home/p2p' | grep -v 'grep' | awk '{print $2}') &> /dev/null
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+BASE_DIR=/home/p2p/p2p-svn-repo/trunk/bt-prof/
+
+source $BASE_DIR/config.bash
+
+pushd .
+cd $DLD_DIR
+
+find -name '*Tropic*' -type d -exec rm -fr {} \; &> /dev/null
+find -name '*ubuntu*' -type d -exec rm -fr {} \; &> /dev/null
+find -name '*Ubuntu*' -type d -exec rm -fr {} \; &> /dev/null
+
+popd
+
+rm -fr ~/.bittorrent/incomplete/*
+rm -fr ~/.bittorrent/console/*
+rm -fr ~/.bittorrent/mutex/*
+rm -fr ~/.Tribler
+rm -fr ~/.hrktorrent
+rm -fr ~/TriblerDownloads
+rm -fr ~/.azureus
+rm -fr ~/Azureus\ Downloads
+rm -fr ~/BitTorrent\ Downloads
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+find /home/p2p/p2p-dld -mindepth 2 -maxdepth 2 -exec rm -fr {} \;
+find /home/p2p/p2p-log -mindepth 2 -maxdepth 2 -exec rm -fr {} \;
+
+rm -fr ~/.bittorrent/incomplete/*
+rm -fr ~/.bittorrent/console/*
+rm -fr ~/.bittorrent/mutex/*
+rm -fr ~/.Tribler
+rm -fr ~/.hrktorrent
+rm -fr ~/TriblerDownloads
+rm -fr ~/.azureus
+rm -fr ~/Azureus\ Downloads
+rm -fr ~/BitTorrent\ Downloads
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+CLIENT_ID=aria
+DLD_END_STRING="SEEDING"
+COMMAND=/home/p2p/p2p-clients/aria2c-1.0.0/src/aria2c
+
+function run_client()
+{
+ _METAFILE=$1
+ _LOCAL_DLD_DIR=$2
+ _LOG_FILE=$3
+ _VERBOSE_FILE=$4
+
+ $COMMAND --log=${_VERBOSE_LOG_FILE} --log-level=debug --dir=${_LOCAL_DLD_DIR} --torrent-file=${_METAFILE} &> ${_LOG_FILE} &
+}
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 2; then
+ echo "Usage: $0 aria_log_file start_time[epoch format]"
+ exit 1
+fi
+
+LOG_FILE=$1
+START_DATE=$2
+
+grep 'SIZE' ${LOG_FILE} | awk -v start_date=${START_DATE} '
+BEGIN {
+ FS = "[ :/\(\)]+"
+ secs = start_date
+}
+
+{
+ secs++
+ print secs "\t" $5
+}'
--- /dev/null
+#!/bin/bash
+
+# argument should be metafile (.torrent)
+if test $# -ne 4; then
+ echo "Usage: $0 global_config_file metafile kill_afer_dld use_monitoring"
+ exit 1
+fi
+
+METAFILE=$2
+KILL_AFTER_DLD=$3
+USE_MONITORING=$4
+
+# load global config file
+source $1
+
+# load local config file
+source ./config.bash
+
+LOCAL_RUN_DIR=${RUN_DIR}/${CLIENT_ID}
+LOCAL_LOG_DIR=${LOG_DIR}/${CLIENT_ID}
+LOCAL_DLD_DIR=${DLD_DIR}/${CLIENT_ID}
+LOCAL_RES_DIR=${RES_DIR}/${CLIENT_ID}
+
+START_TIME=$(date +%F.%T)
+START_TIME_EPOCH=$(date +%s)
+
+SESSION_NAME="${CLIENT_ID}_$(basename ${METAFILE})_${START_TIME}"
+LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}.log
+VERBOSE_LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}_verbose.log
+TIME_FILE="${LOCAL_LOG_DIR}/${SESSION_NAME}.time"
+RES_FILE="${LOCAL_RES_DIR}/${SESSION_NAME}.rate"
+
+UPLOAD_SCRIPT=${LOCAL_RUN_DIR}/upload.bash
+PARSE_SCRIPT=${LOCAL_RUN_DIR}/parse.bash
+MON_SCRIPT=${LOCAL_RUN_DIR}/start_mon.bash
+
+# parse and upload result file
+function do_parse_upload()
+{
+ bash ${PARSE_SCRIPT} ${LOG_FILE} ${START_TIME_EPOCH} > ${RES_FILE}
+ bash ${UPLOAD_SCRIPT} ${RES_FILE} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} &
+}
+
+# call run_client function; CLIENT_PID stores client processes' pid
+run_client ${METAFILE} ${LOCAL_DLD_DIR} ${LOG_FILE} ${VERBOSE_LOG_FILE}
+CLIENT_PID=$!
+
+secs=0
+while true; do
+ tail "${LOG_FILE}" | grep "${DLD_END_STRING}" &> /dev/null
+ if test $? -eq 0; then
+ echo "${CLIENT_ID} time: $(ps -p ${CLIENT_PID} -o etime --no-headers)" > ${TIME_FILE}
+ if test "${KILL_AFTER_DLD}" == "yes"; then
+ kill -9 ${CLIENT_PID}
+ fi
+ break
+ fi
+ sleep 1
+
+ if test "${USE_MONITORING}" == "yes"; then
+ # do a parse on the log file each minute and upload it
+ (($secs++))
+ mod=$(($secs % 60))
+ if test $mod -eq 0; then
+ do_parse_upload
+ fi
+
+ # restart monitoring every 4 minutes
+ if test $secs -eq 240; then
+ bash ${MON_SCRIPT} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} $(basename ${RES_FILE})
+ fi
+ fi
+done
+
+# one last time :-)
+if test "${USE_MONITORING}" == "yes"; then
+ do_parse_upload
+fi
+
+# do an analysis on output file
+# TODO
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 ip_address username remote_dir mon_file"
+ exit 1
+fi
+
+IP_ADDRESS=$1
+USR=$2
+RDIR=$3
+MON_FILE=$4
+
+ssh ${USR}@${IP_ADDRESS} "nohup python ~/apmon_use/mon-apmon.py ${RDIR}/${MON_FILE} aria &> out_aria.txt &"
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 file ip_address username remote_dir"
+ exit 1
+fi
+
+FILE=$1
+IP_ADDRESS=$2
+USR=$3
+RDIR=$4
+
+scp ${FILE} ${USR}@${IP_ADDRESS}:${RDIR}
--- /dev/null
+#!/bin/bash
+
+METAFILE=$1
+LOCAL_DLD_DIR=$2
+LOG_FILE=$3
+VERBOSE_FILE=$4
+
+cat <<END
+log off
+sh t
+r all
+set "Default save path" "${LOCAL_DLD_DIR}" string
+set "Use default data dir" true boolean
+set "Logger.Enabled" true boolean
+set "Logging Enable" true boolean
+set "Logging Dir" "/home/p2p/p2p-log/azureus/" string
+log on "Start/Stop Rules"
+log on "Core Updater"
+log on "Download Remove Rules"
+log on "External Seed"
+log on "LAN Peer Finder"
+log -f ${VERBOSE_FILE} on
+add ${METAFILE}
+END
+
+sleep 5
+while true; do
+ echo "sh t"
+ sleep 1
+done
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+CLIENT_ID=azureus
+DLD_END_STRING="Finished"
+
+COMMAND=/home/p2p/p2p-clients/vuze/Azureus2.jar
+
+function run_client()
+{
+ _METAFILE=$1
+ _LOCAL_DLD_DIR=$2
+ _LOG_FILE=$3
+ _VERBOSE_FILE=$4
+
+ bash ${LOCAL_RUN_DIR}/azr_helper.bash ${_METAFILE} ${_LOCAL_DLD_DIR} ${_LOG_FILE} ${_VERBOSE_FILE} | java -jar ${COMMAND} --ui=console > ${_LOG_FILE} 2> /home/p2p/p2p-log/azureus/err.log &
+}
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 2; then
+ echo "Usage: $0 azr_log_file start_time[epoch format]"
+ exit 1
+fi
+
+LOG_FILE=$1
+START_DATE=$2
+
+grep 'ETA' ${LOG_FILE} | awk -v start_date=${START_DATE} '
+BEGIN {
+ FS = "[] :,\t[]+"
+ secs = start_date
+}
+
+{
+ secs++
+ print secs "\t" $4
+}'
--- /dev/null
+#!/bin/bash
+
+# argument should be metafile (.torrent)
+if test $# -ne 4; then
+ echo "Usage: $0 global_config_file metafile kill_afer_dld use_monitoring"
+ exit 1
+fi
+
+METAFILE=$2
+KILL_AFTER_DLD=$3
+USE_MONITORING=$4
+
+# load global config file
+source $1
+
+# load local config file
+source ./config.bash
+
+LOCAL_RUN_DIR=${RUN_DIR}/${CLIENT_ID}
+LOCAL_LOG_DIR=${LOG_DIR}/${CLIENT_ID}
+LOCAL_DLD_DIR=${DLD_DIR}/${CLIENT_ID}
+LOCAL_RES_DIR=${RES_DIR}/${CLIENT_ID}
+
+START_TIME=$(date +%F.%T)
+START_TIME_EPOCH=$(date +%s)
+
+SESSION_NAME="${CLIENT_ID}_$(basename ${METAFILE})_${START_TIME}"
+LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}.log
+VERBOSE_LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}_verbose.log
+TIME_FILE="${LOCAL_LOG_DIR}/${SESSION_NAME}.time"
+RES_FILE="${LOCAL_RES_DIR}/${SESSION_NAME}.rate"
+
+UPLOAD_SCRIPT=${LOCAL_RUN_DIR}/upload.bash
+PARSE_SCRIPT=${LOCAL_RUN_DIR}/parse.bash
+MON_SCRIPT=${LOCAL_RUN_DIR}/start_mon.bash
+
+# parse and upload result file
+function do_parse_upload()
+{
+ bash ${PARSE_SCRIPT} ${LOG_FILE} ${START_TIME_EPOCH} > ${RES_FILE}
+ bash ${UPLOAD_SCRIPT} ${RES_FILE} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} &
+}
+
+# call run_client function; CLIENT_PID stores client processes' pid
+run_client ${METAFILE} ${LOCAL_DLD_DIR} ${LOG_FILE} ${VERBOSE_LOG_FILE}
+CLIENT_PID=$!
+
+secs=0
+while true; do
+ tail "${LOG_FILE}" | grep "${DLD_END_STRING}" &> /dev/null
+ if test $? -eq 0; then
+ echo "${CLIENT_ID} time: $(ps -p ${CLIENT_PID} -o etime --no-headers)" > ${TIME_FILE}
+ if test "${KILL_AFTER_DLD}" == "yes"; then
+ kill -9 ${CLIENT_PID}
+ fi
+ break
+ fi
+ sleep 1
+
+ if test "${USE_MONITORING}" == "yes"; then
+ # do a parse on the log file each minute and upload it
+ (($secs++))
+ mod=$(($secs % 60))
+ if test $mod -eq 0; then
+ do_parse_upload
+ fi
+
+ # restart monitoring every 4 minutes
+ if test $secs -eq 240; then
+ bash ${MON_SCRIPT} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} $(basename ${RES_FILE})
+ fi
+ fi
+done
+
+# one last time :-)
+if test "${USE_MONITORING}" == "yes"; then
+ do_parse_upload
+fi
+
+# do an analysis on output file
+# TODO
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 ip_address username remote_dir mon_file"
+ exit 1
+fi
+
+IP_ADDRESS=$1
+USR=$2
+RDIR=$3
+MON_FILE=$4
+
+ssh ${USR}@${IP_ADDRESS} "nohup python ~/apmon_use/mon-apmon.py ${RDIR}/${MON_FILE} azureus &> out_azr.txt &"
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 file ip_address username remote_dir"
+ exit 1
+fi
+
+FILE=$1
+IP_ADDRESS=$2
+USR=$3
+RDIR=$4
+
+scp ${FILE} ${USR}@${IP_ADDRESS}:${RDIR}
--- /dev/null
+#!/bin/bash
+
+CLIENT_ID=btt
+COMMAND=~/cs-tribler/perf/bt-clients/BitTornado-CVS/btdownloadheadless.py
--- /dev/null
+#!/bin/bash
+
+# argument should be metafile (.torrent)
+if test $# -ne 2; then
+ echo "Usage: $0 metafile kill|no_kill"
+ exit 1
+fi
+
+LOCAL_BASE=~/cs-tribler/perf/bt-prof/clients/bittornado/
+source ${LOCAL_BASE}/config.bash
+
+METAFILE=$1
+KILL_AFTER_DLD=$2
+
+START_TIME=$(date +%F.%T)
+BASE_LOG_NAME="${CLIENT_ID}_$(basename ${METAFILE})_${START_TIME}"
+
+cd ${LOCAL_BASE}/dld/ && ${COMMAND} --responsefile ${METAFILE} &> ${LOCAL_BASE}/log/"${BASE_LOG_NAME}".log &
+CLIENT_PID=$!
+
+while true; do
+ tail "${LOCAL_BASE}/log/${BASE_LOG_NAME}.log" | grep 'Download Succeeded' &> /dev/null
+ if test $? -eq 0; then
+ echo "bittornado time: $(ps -p $CLIENT_PID -o etime --no-headers)" > ${LOCAL_BASE}/log/"${BASE_LOG_NAME}.time"
+ if test "${KILL_AFTER_DLD}" == "kill"; then
+ kill -9 ${CLIENT_PID}
+ fi
+ break
+ fi
+ sleep 1
+done
+
+# do an analysis on output file
+# TODO
--- /dev/null
+#!/bin/bash
+
+CLIENT_ID=hrktorrent
+DLD_END_STRING="Torrent finished"
+MON_UPDATE_INTERVAL=5
+
+COMMAND=/home/p2p/p2p-clients/hrktorrent/hrktorrent
+
+function run_client()
+{
+ _METAFILE=$1
+ _LOCAL_DLD_DIR=$2
+ _LOG_FILE=$3
+ _VERBOSE_FILE=$4
+ _PORT=$5
+ _UPLOAD_SPEED=$6
+ _DOWNLOAD_SPEED=$7
+
+ echo "metafile = $_METAFILE; dld_dir = $_LOCAL_DLD_DIR; log = $_LOG_FILE; _verbose = $_VERBOSE_FILE; port = $_PORT; us = $_UPLOAD_SPEED; ds = $_DOWNLOAD_SPEED"
+ pushd . &> /dev/null
+ cd ${_LOCAL_DLD_DIR}
+ $COMMAND --minport${_PORT} --maxport${_PORT} --maxup${_UPLOAD_SPEED} --maxdown${_DOWNLOAD_SPEED} ${_METAFILE} &> ${_LOG_FILE} &
+ popd
+}
+
+function parse_log()
+{
+ _LOG_FILE=$1
+
+ tail -1 $_LOG_FILE | awk -F '[ :,]+' '{print $7;}' | sed 's/kb\/s//g'
+}
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 "
+ exit 1
+fi
+
+source ./config.bash
+
+_MON_SCRIPT=$1
+_LOG_FILE=$2
+_VERBOSE_FILE=$3
+_START_TIME=$4
+
+_MON_BASE_DIR=$(dirname $_MON_SCRIPT)
+
+while true; do
+ sleep $MON_UPDATE_INTERVAL
+ ret=$(parse_log $_LOG_FILE)
+ if test $? -ne 0; then
+ echo "Ended"
+ exit 0
+ fi
+
+ pushd . &> /dev/null
+ cd $_MON_BASE_DIR
+ python $_MON_SCRIPT $(hostname) "$CLIENT_ID-${_START_TIME}-ds" $ret
+ popd &> /dev/null
+done
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 2; then
+ echo "Usage: $0 hrk_log_file start_time[epoch format]"
+ exit 1
+fi
+
+LOG_FILE=$1
+START_DATE=$2
+
+grep '^ps' ${LOG_FILE} | awk -v start_date=${START_DATE} '
+BEGIN {
+ FS = "[ |:|,]+"
+ secs = start_date
+}
+
+{
+ secs++
+ print secs "\t" $19
+}'
--- /dev/null
+#!/bin/bash
+
+# argument should be metafile (.torrent)
+if test $# -ne 7; then
+ echo "Usage: $0 global_config_file metafile kill_afer_dld use_monitoring port upload_speed download_speed"
+ exit 1
+fi
+
+METAFILE=$2
+KILL_AFTER_DLD=$3
+USE_MONITORING=$4
+PORT=$5
+UPLOAD_SPEED=$6
+DOWNLOAD_SPEED=$7
+
+
+# load global config file
+source $1
+
+# load local config file
+source ./config.bash
+
+LOCAL_RUN_DIR=${RUN_DIR}/${CLIENT_ID}
+LOCAL_LOG_DIR=${LOG_DIR}/${CLIENT_ID}
+LOCAL_DLD_DIR=${DLD_DIR}/${CLIENT_ID}
+LOCAL_RES_DIR=${RES_DIR}/${CLIENT_ID}
+
+START_TIME=$(date +%F.%T)
+START_TIME_EPOCH=$(date +%s)
+
+SESSION_NAME="${CLIENT_ID}_$(basename ${METAFILE})_${START_TIME}"
+LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}.log
+VERBOSE_LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}_verbose.log
+TIME_FILE="${LOCAL_LOG_DIR}/${SESSION_NAME}.time"
+RES_FILE="${LOCAL_RES_DIR}/${SESSION_NAME}.rate"
+
+#UPLOAD_SCRIPT=${LOCAL_RUN_DIR}/upload.bash
+#PARSE_SCRIPT=${LOCAL_RUN_DIR}/parse.bash
+#MON_SCRIPT=${LOCAL_RUN_DIR}/start_mon.bash
+
+# parse and upload result file
+function do_parse_upload()
+{
+ bash ${PARSE_SCRIPT} ${LOG_FILE} ${START_TIME_EPOCH} > ${RES_FILE}
+ bash ${UPLOAD_SCRIPT} ${RES_FILE} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} &
+}
+
+# call run_client function; CLIENT_PID stores client processes' pid
+run_client ${METAFILE} ${LOCAL_DLD_DIR} ${LOG_FILE} ${VERBOSE_LOG_FILE} \
+ ${PORT} ${UPLOAD_SPEED} ${DOWNLOAD_SPEED}
+CLIENT_PID=$!
+
+if test "${USE_MONITORING}" == "yes"; then
+ bash monitor.bash ${MON_SCRIPT} ${LOG_FILE} ${VERBOSE_LOG_FILE} "test" &> ${LOCAL_LOG_DIR}/mon_out_err.txt
+ MONITOR_PID=$!
+fi
+
+secs=0
+while true; do
+ tail "${LOG_FILE}" | grep "${DLD_END_STRING}" &> /dev/null
+ if test $? -eq 0; then
+ echo "${CLIENT_ID} time: $(ps -p ${CLIENT_PID} -o etime --no-headers)" > ${TIME_FILE}
+ if test "${KILL_AFTER_DLD}" == "yes"; then
+ kill -9 ${CLIENT_PID}
+ kill -9 ${MONITOR_PID} &> /dev/null
+ fi
+ break
+ fi
+ sleep 1
+
+# if test "${USE_MONITORING}" == "yes"; then
+# # do a parse on the log file each minute and upload it
+# (($secs++))
+# mod=$(($secs % 60))
+# if test $mod -eq 0; then
+# do_parse_upload
+# fi
+#
+# # restart monitoring every 4 minutes
+# if test $secs -eq 240; then
+# bash ${MON_SCRIPT} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} $(basename ${RES_FILE})
+# fi
+# fi
+done
+
+# one last time :-)
+#if test "${USE_MONITORING}" == "yes"; then
+# do_parse_upload
+#fi
+
+# do an analysis on output file
+# TODO
--- /dev/null
+bash -c "~/cs-tribler/perf/bt-clients/hrktorrent-0.3.2-mod/hrktorrent ~/simple.torrent > out.txt < /dev/null" &
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 ip_address username remote_dir mon_file"
+ exit 1
+fi
+
+IP_ADDRESS=$1
+USR=$2
+RDIR=$3
+MON_FILE=$4
+
+ssh ${USR}@${IP_ADDRESS} "nohup python ~/apmon_use/mon-apmon.py ${RDIR}/${MON_FILE} hrk &> out_hrk.txt &"
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 file ip_address username remote_dir"
+ exit 1
+fi
+
+FILE=$1
+IP_ADDRESS=$2
+USR=$3
+RDIR=$4
+
+scp ${FILE} ${USR}@${IP_ADDRESS}:${RDIR}
--- /dev/null
+#!/bin/bash
+
+CLIENT_ID=mainline
+DLD_END_STRING="seeding"
+
+COMMAND=/home/p2p/p2p-clients/BitTorrent-5.2.2/bittorrent-console.py
+
+function run_client()
+{
+ _METAFILE=$1
+ _LOCAL_DLD_DIR=$2
+ _LOG_FILE=$3
+ _VERBOSE_FILE=$4
+
+ $COMMAND --save_in ${_LOCAL_DLD_DIR} --display_interval 1 ${_METAFILE} &> ${_LOG_FILE} &
+}
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 2; then
+ echo "Usage: $0 mnl_log_file start_time[epoch format]"
+ exit 1
+fi
+
+LOG_FILE=$1
+START_DATE=$2
+
+grep '^percent' ${LOG_FILE} | awk -v start_date=${START_DATE} '
+BEGIN {
+ FS = "[ |:|,\t]+"
+ secs = start_date
+ done = 0
+}
+
+{
+ secs++
+ if (done == 0) {
+ print secs "\t" $3 "%"
+ }
+ if (done == 0) {
+ if ($3 ~ "100") {
+ done = 1
+ }
+ }
+}'
--- /dev/null
+#!/bin/bash
+
+# argument should be metafile (.torrent)
+if test $# -ne 4; then
+ echo "Usage: $0 global_config_file metafile kill_afer_dld use_monitoring"
+ exit 1
+fi
+
+METAFILE=$2
+KILL_AFTER_DLD=$3
+USE_MONITORING=$4
+
+# load global config file
+source $1
+
+# load local config file
+source ./config.bash
+
+LOCAL_RUN_DIR=${RUN_DIR}/${CLIENT_ID}
+LOCAL_LOG_DIR=${LOG_DIR}/${CLIENT_ID}
+LOCAL_DLD_DIR=${DLD_DIR}/${CLIENT_ID}
+LOCAL_RES_DIR=${RES_DIR}/${CLIENT_ID}
+
+START_TIME=$(date +%F.%T)
+START_TIME_EPOCH=$(date +%s)
+
+SESSION_NAME="${CLIENT_ID}_$(basename ${METAFILE})_${START_TIME}"
+LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}.log
+VERBOSE_LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}_verbose.log
+TIME_FILE="${LOCAL_LOG_DIR}/${SESSION_NAME}.time"
+RES_FILE="${LOCAL_RES_DIR}/${SESSION_NAME}.rate"
+
+UPLOAD_SCRIPT=${LOCAL_RUN_DIR}/upload.bash
+PARSE_SCRIPT=${LOCAL_RUN_DIR}/parse.bash
+MON_SCRIPT=${LOCAL_RUN_DIR}/start_mon.bash
+
+# parse and upload result file
+function do_parse_upload()
+{
+ bash ${PARSE_SCRIPT} ${LOG_FILE} ${START_TIME_EPOCH} > ${RES_FILE}
+ bash ${UPLOAD_SCRIPT} ${RES_FILE} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} &
+}
+
+# call run_client function; CLIENT_PID stores client processes' pid
+run_client ${METAFILE} ${LOCAL_DLD_DIR} ${LOG_FILE} ${VERBOSE_LOG_FILE}
+CLIENT_PID=$!
+
+secs=0
+while true; do
+ tail "${LOG_FILE}" | grep "${DLD_END_STRING}" &> /dev/null
+ if test $? -eq 0; then
+ echo "${CLIENT_ID} time: $(ps -p ${CLIENT_PID} -o etime --no-headers)" > ${TIME_FILE}
+ if test "${KILL_AFTER_DLD}" == "yes"; then
+ kill -9 ${CLIENT_PID}
+ fi
+ break
+ fi
+ sleep 1
+
+ if test "${USE_MONITORING}" == "yes"; then
+ # do a parse on the log file each minute and upload it
+ (($secs++))
+ mod=$(($secs % 60))
+ if test $mod -eq 0; then
+ do_parse_upload
+ fi
+
+ # restart monitoring every 4 minutes
+ if test $secs -eq 240; then
+ bash ${MON_SCRIPT} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} $(basename ${RES_FILE})
+ fi
+ fi
+done
+
+# one last time :-)
+if test "${USE_MONITORING}" == "yes"; then
+ do_parse_upload
+fi
+
+# do an analysis on output file
+# TODO
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 ip_address username remote_dir mon_file"
+ exit 1
+fi
+
+IP_ADDRESS=$1
+USR=$2
+RDIR=$3
+MON_FILE=$4
+
+ssh ${USR}@${IP_ADDRESS} "nohup python ~/apmon_use/mon-apmon.py ${RDIR}/${MON_FILE} mainline &> out_mnl.txt &"
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 file ip_address username remote_dir"
+ exit 1
+fi
+
+FILE=$1
+IP_ADDRESS=$2
+USR=$3
+RDIR=$4
+
+scp ${FILE} ${USR}@${IP_ADDRESS}:${RDIR}
--- /dev/null
+#!/bin/bash
+
+CLIENT_ID=transmission
+DLD_END_STRING="Seeding"
+
+COMMAND=/home/p2p/p2p-clients/transmission-1.40/cli/transmissioncli
+
+function run_client()
+{
+ _METAFILE=$1
+ _LOCAL_DLD_DIR=$2
+ _LOG_FILE=$3
+ _VERBOSE_FILE=$4
+
+ ${COMMAND} -v 1 -u -1 -w ${_LOCAL_DLD_DIR} ${_METAFILE} &> ${_LOG_FILE} &
+}
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 2; then
+ echo "Usage: $0 tms_log_file start_time[epoch format]"
+ exit 1
+fi
+
+LOG_FILE=$1
+START_DATE=$2
+
+grep 'Progress' ${LOG_FILE} | sed 's/\r/\n/g' | awk -v start_date=${START_DATE} '
+BEGIN {
+ FS = "[ |:|,]+"
+ secs = start_date
+}
+
+{
+ secs++
+ print secs "\t" $2
+}' | grep '%'
--- /dev/null
+#!/bin/bash
+
+# argument should be metafile (.torrent)
+if test $# -ne 4; then
+ echo "Usage: $0 global_config_file metafile kill_afer_dld use_monitoring"
+ exit 1
+fi
+
+METAFILE=$2
+KILL_AFTER_DLD=$3
+USE_MONITORING=$4
+
+# load global config file
+source $1
+
+# load local config file
+source ./config.bash
+
+LOCAL_RUN_DIR=${RUN_DIR}/${CLIENT_ID}
+LOCAL_LOG_DIR=${LOG_DIR}/${CLIENT_ID}
+LOCAL_DLD_DIR=${DLD_DIR}/${CLIENT_ID}
+LOCAL_RES_DIR=${RES_DIR}/${CLIENT_ID}
+
+START_TIME=$(date +%F.%T)
+START_TIME_EPOCH=$(date +%s)
+
+SESSION_NAME="${CLIENT_ID}_$(basename ${METAFILE})_${START_TIME}"
+LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}.log
+VERBOSE_LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}_verbose.log
+TIME_FILE="${LOCAL_LOG_DIR}/${SESSION_NAME}.time"
+RES_FILE="${LOCAL_RES_DIR}/${SESSION_NAME}.rate"
+
+UPLOAD_SCRIPT=${LOCAL_RUN_DIR}/upload.bash
+PARSE_SCRIPT=${LOCAL_RUN_DIR}/parse.bash
+MON_SCRIPT=${LOCAL_RUN_DIR}/start_mon.bash
+
+# parse and upload result file
+function do_parse_upload()
+{
+ bash ${PARSE_SCRIPT} ${LOG_FILE} ${START_TIME_EPOCH} > ${RES_FILE}
+ bash ${UPLOAD_SCRIPT} ${RES_FILE} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} &
+}
+
+# call run_client function; CLIENT_PID stores client processes' pid
+run_client ${METAFILE} ${LOCAL_DLD_DIR} ${LOG_FILE} ${VERBOSE_LOG_FILE}
+CLIENT_PID=$!
+
+secs=0
+while true; do
+ tail "${LOG_FILE}" | grep "${DLD_END_STRING}" &> /dev/null
+ if test $? -eq 0; then
+ echo "${CLIENT_ID} time: $(ps -p ${CLIENT_PID} -o etime --no-headers)" > ${TIME_FILE}
+ if test "${KILL_AFTER_DLD}" == "yes"; then
+ kill -9 ${CLIENT_PID}
+ fi
+ break
+ fi
+ sleep 1
+
+ if test "${USE_MONITORING}" == "yes"; then
+ # do a parse on the log file each minute and upload it
+ (($secs++))
+ mod=$(($secs % 60))
+ if test $mod -eq 0; then
+ do_parse_upload
+ fi
+
+ # restart monitoring every 4 minutes
+ if test $secs -eq 240; then
+ bash ${MON_SCRIPT} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} $(basename ${RES_FILE})
+ fi
+ fi
+done
+
+# one last time :-)
+if test "${USE_MONITORING}" == "yes"; then
+ do_parse_upload
+fi
+
+# do an analysis on output file
+# TODO
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 ip_address username remote_dir mon_file"
+ exit 1
+fi
+
+IP_ADDRESS=$1
+USR=$2
+RDIR=$3
+MON_FILE=$4
+
+ssh ${USR}@${IP_ADDRESS} "nohup python ~/apmon_use/mon-apmon.py ${RDIR}/${MON_FILE} transmission &> out_tms.txt &"
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 file ip_address username remote_dir"
+ exit 1
+fi
+
+FILE=$1
+IP_ADDRESS=$2
+USR=$3
+RDIR=$4
+
+scp ${FILE} ${USR}@${IP_ADDRESS}:${RDIR}
--- /dev/null
+#!/bin/bash
+
+CLIENT_ID=tribler
+DLD_END_STRING="DLSTATUS_SEEDING"
+
+ROOTDIR=~/p2p-clients/Tribler_4.5.1_src
+COMMAND=~/p2p-clients/Tribler_4.5.1_src/Tribler/Tools/cmdlinedl-razvand.py
+
+function run_client()
+{
+ _METAFILE=$1
+ _LOCAL_DLD_DIR=$2
+ _LOG_FILE=$3
+ _VERBOSE_FILE=$4
+ _PORT=$5
+
+ export PYTHONPATH=${ROOTDIR}
+ python ${COMMAND} -o ${_LOCAL_DLD_DIR} -p ${_PORT} ${_METAFILE} &> ${_LOG_FILE} &
+}
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 2; then
+ echo "Usage: $0 trb_log_file start_time[epoch format]"
+ exit 1
+fi
+
+LOG_FILE=$1
+START_DATE=$2
+
+grep 'DLSTATUS' /home/p2p/p2p-svn-repo/trunk/bt-prof/clients/tribler/my.log | sed 's/\(.*\)DLSTATUS/DLSTATUS/g' | awk -v start_date=${START_DATE} '
+BEGIN {
+ FS = "[ ]+"
+ secs = start_date
+}
+
+{
+ secs++
+ perc = $2 * 100
+ print secs "\t" perc "%"
+}'
--- /dev/null
+#!/bin/bash
+
+# argument should be metafile (.torrent)
+if test $# -ne 7; then
+ echo "Usage: $0 global_config_file metafile kill_afer_dld port use_monitoring"
+ exit 1
+fi
+
+METAFILE=$2
+KILL_AFTER_DLD=$3
+USE_MONITORING=$4
+LISTENING_PORT=$5
+
+# load global config file
+source $1
+
+# load local config file
+source ./config.bash
+
+LOCAL_RUN_DIR=${RUN_DIR}/${CLIENT_ID}
+LOCAL_LOG_DIR=${LOG_DIR}/${CLIENT_ID}
+LOCAL_DLD_DIR=${DLD_DIR}/${CLIENT_ID}
+LOCAL_RES_DIR=${RES_DIR}/${CLIENT_ID}
+
+START_TIME=$(date +%F.%T)
+START_TIME_EPOCH=$(date +%s)
+
+SESSION_NAME="${CLIENT_ID}_$(basename ${METAFILE})_${START_TIME}"
+LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}.log
+VERBOSE_LOG_FILE=${LOCAL_LOG_DIR}/${SESSION_NAME}_verbose.log
+TIME_FILE="${LOCAL_LOG_DIR}/${SESSION_NAME}.time"
+RES_FILE="${LOCAL_RES_DIR}/${SESSION_NAME}.rate"
+
+UPLOAD_SCRIPT=${LOCAL_RUN_DIR}/upload.bash
+PARSE_SCRIPT=${LOCAL_RUN_DIR}/parse.bash
+MON_SCRIPT=${LOCAL_RUN_DIR}/start_mon.bash
+
+# parse and upload result file
+function do_parse_upload()
+{
+ bash ${PARSE_SCRIPT} ${LOG_FILE} ${START_TIME_EPOCH} > ${RES_FILE}
+ bash ${UPLOAD_SCRIPT} ${RES_FILE} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} &
+}
+
+# call run_client function; CLIENT_PID stores client processes' pid
+run_client ${METAFILE} ${LOCAL_DLD_DIR} ${LOG_FILE} ${VERBOSE_LOG_FILE} ${LISTENING_PORT}
+CLIENT_PID=$!
+
+secs=0
+while true; do
+ tail "${LOG_FILE}" | grep "${DLD_END_STRING}" &> /dev/null
+ if test $? -eq 0; then
+ echo "${CLIENT_ID} time: $(ps -p ${CLIENT_PID} -o etime --no-headers)" > ${TIME_FILE}
+ if test "${KILL_AFTER_DLD}" == "yes"; then
+ kill -9 ${CLIENT_PID}
+ fi
+ break
+ fi
+ sleep 1
+
+ if test "${USE_MONITORING}" == "yes"; then
+ # do a parse on the log file each minute and upload it
+ (($secs++))
+ mod=$(($secs % 60))
+ if test $mod -eq 0; then
+ do_parse_upload
+ fi
+
+ # restart monitoring every 4 minutes
+ if test $secs -eq 240; then
+ bash ${MON_SCRIPT} ${UPLOAD_IP_ADDRESS} ${UPLOAD_USER_NAME} ${UPLOAD_DIR} $(basename ${RES_FILE})
+ fi
+ fi
+done
+
+# one last time :-)
+if test "${USE_MONITORING}" == "yes"; then
+ do_parse_upload
+fi
+
+# do an analysis on output file
+# TODO
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 ip_address username remote_dir mon_file"
+ exit 1
+fi
+
+IP_ADDRESS=$1
+USR=$2
+RDIR=$3
+MON_FILE=$4
+
+ssh ${USR}@${IP_ADDRESS} "nohup python ~/apmon_use/mon-apmon.py ${RDIR}/${MON_FILE} Tribler &> out_trb.txt &"
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 4; then
+ echo "Usage: $0 file ip_address username remote_dir"
+ exit 1
+fi
+
+FILE=$1
+IP_ADDRESS=$2
+USR=$3
+RDIR=$4
+
+scp ${FILE} ${USR}@${IP_ADDRESS}:${RDIR}
--- /dev/null
+#!/bin/bash
+
+BASE_DIR=/home/p2p/p2p-svn-repo/trunk/bt-prof/
+RUN_DIR=/home/p2p/p2p-svn-repo/trunk/bt-prof/clients/
+LOG_DIR=/home/p2p/p2p-log/
+DLD_DIR=/home/p2p/p2p-dld/
+RES_DIR=/home/p2p/p2p-res/
+MON_SCRIPT=/home/p2p/p2p-svn-repo/trunk/bt-prof/mon/p2p-mon.py
+
+declare -a CLIENT_ARRAY
+CLIENT_ARRAY=("azureus" "hrktorrent" "tribler" "transmission" "mainline" "aria")
+CLIENT_ARRAY_SIZE=6
+
+for i in $(seq 0 $(($CLIENT_ARRAY_SIZE - 1))); do
+ mkdir -p $LOG_DIR/${CLIENT_ARRAY[$i]} &> /dev/null
+ mkdir -p $DLD_DIR/${CLIENT_ARRAY[$i]} &> /dev/null
+ mkdir -p $RES_DIR/${CLIENT_ARRAY[$i]} &> /dev/null
+done
+
+UPLOAD_IP_ADDRESS="141.85.37.25"
+UPLOAD_USER_NAME="p2p"
+UPLOAD_DIR="logs/"
--- /dev/null
+#!/bin/bash
+
+print_hrktorrent()
+{
+ __file=$(ps -e -o cmd | grep 'hrktorr\ent' | head -n 1 | cut -d ' ' -f 6)
+ if test " $__file" != " "; then
+ echo "hrktorrent: $__file"
+ fi
+}
+
+print_azureus()
+{
+ a="1"
+}
+
+print_tribler()
+{
+ a="1"
+}
+
+print_mainline()
+{
+ a="1"
+}
+
+print_aria()
+{
+ a="1"
+}
+
+print_transmission()
+{
+ a="1"
+}
+
+print_hrktorrent
+print_azureus
+print_tribler
+print_mainline
+print_aria
+print_transmission
+
+exit 0
--- /dev/null
+
+"""
+ * ApMon - Application Monitoring Tool
+ * Version: 2.2.13
+ *
+ * Copyright (C) 2006 California Institute of Technology
+ *
+ * Permission is hereby granted, free of charge, to use, copy and modify
+ * this software and its documentation (the "Software") for any
+ * purpose, provided that existing copyright notices are retained in
+ * all copies and that this notice is included verbatim in any distributions
+ * or substantial portions of the Software.
+ * This software is a part of the MonALISA framework (http://monalisa.cacr.caltech.edu).
+ * Users of the Software are asked to feed back problems, benefits,
+ * and/or suggestions about the software to the MonALISA Development Team
+ * (developers@monalisa.cern.ch). Support for this software - fixing of bugs,
+ * incorporation of new features - is done on a best effort basis. All bug
+ * fixes and enhancements will be made available under the same terms and
+ * conditions as the original software,
+
+ * IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY DERIVATIVES THEREOF,
+ * EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ * THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE IS
+ * PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+ * MODIFICATIONS.
+"""
+
+import time
+import threading
+import sys
+import traceback
+
+# Simple logging class
+class Logger:
+
+ FATAL = 0 # When something very bad happened and we should quit
+ ERROR = 1 # Tipically when something important fails
+ WARNING = 2 # Intermediate logging level.
+ INFO = 3 # Intermediate logging level.
+ NOTICE = 4 # Logging level with detailed information.
+ DEBUG = 5 # Logging level for debugging
+
+ LEVELS = ['FATAL', 'ERROR', 'WARNING', 'INFO', 'NOTICE', 'DEBUG']
+
+ # Constructor
+ def __init__ (this, defaultLevel = INFO):
+ this.log_lock = threading.Lock();
+ this.logLevel = defaultLevel
+
+ # Print the given message if the level is more serious as the existing one
+ def log(this, level, message, printex=False):
+ this.log_lock.acquire();
+ if(level <= this.logLevel):
+ print time.asctime() + ": ApMon["+Logger.LEVELS[level]+"]: "+message;
+ if printex:
+ traceback.print_exc()
+ this.log_lock.release();
+
+ # Set the logging level
+ def setLogLevel(this, strLevel):
+ this.log_lock.acquire();
+ for l_idx in range(len(Logger.LEVELS)):
+ if strLevel == Logger.LEVELS[l_idx]:
+ this.logLevel = l_idx;
+ this.log_lock.release();
+
--- /dev/null
+anaconda.cs.pub.ro:8884
--- /dev/null
+
+"""
+ * ApMon - Application Monitoring Tool
+ * Version: 2.2.13
+ *
+ * Copyright (C) 2006 California Institute of Technology
+ *
+ * Permission is hereby granted, free of charge, to use, copy and modify
+ * this software and its documentation (the "Software") for any
+ * purpose, provided that existing copyright notices are retained in
+ * all copies and that this notice is included verbatim in any distributions
+ * or substantial portions of the Software.
+ * This software is a part of the MonALISA framework (http://monalisa.cacr.caltech.edu).
+ * Users of the Software are asked to feed back problems, benefits,
+ * and/or suggestions about the software to the MonALISA Development Team
+ * (developers@monalisa.cern.ch). Support for this software - fixing of bugs,
+ * incorporation of new features - is done on a best effort basis. All bug
+ * fixes and enhancements will be made available under the same terms and
+ * conditions as the original software,
+
+ * IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY DERIVATIVES THEREOF,
+ * EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ * THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE IS
+ * PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+ * MODIFICATIONS.
+"""
+
+
+import os
+import re
+import time
+import string
+import socket
+from Logger import *
+
+"""
+Class ProcInfo
+extracts information from the proc/ filesystem for system and job monitoring
+"""
+class ProcInfo:
+ # ProcInfo constructor
+ def __init__ (this, logger):
+ this.DATA = {}; # monitored data that is going to be reported
+ this.LAST_UPDATE_TIME = 0; # when the last measurement was done
+ this.JOBS = {}; # jobs that will be monitored
+ this.logger = logger # use the given logger
+ this.readGenericInfo();
+
+ # This should be called from time to time to update the monitored data,
+ # but not more often than once a second because of the resolution of time()
+ def update (this):
+ if this.LAST_UPDATE_TIME == int(time.time()):
+ this.logger.log(Logger.NOTICE, "ProcInfo: update() called too often!");
+ return;
+ this.readStat();
+ this.readMemInfo();
+ this.readUptimeAndLoadAvg();
+ this.countProcesses();
+ this.readNetworkInfo();
+ this.readNetStat();
+ for pid in this.JOBS.keys():
+ this.readJobInfo(pid);
+ this.readJobDiskUsage(pid);
+ this.LAST_UPDATE_TIME = int(time.time());
+ this.DATA['TIME'] = int(time.time());
+
+ # Call this to add another PID to be monitored
+ def addJobToMonitor (this, pid, workDir):
+ this.JOBS[pid] = {};
+ this.JOBS[pid]['WORKDIR'] = workDir;
+ this.JOBS[pid]['DATA'] = {};
+ #print this.JOBS;
+
+ # Call this to stop monitoring a PID
+ def removeJobToMonitor (this, pid):
+ if this.JOBS.has_key(pid):
+ del this.JOBS[pid];
+
+ # Return a filtered hash containting the system-related parameters and values
+ def getSystemData (this, params, prevDataRef):
+ return this.getFilteredData(this.DATA, params, prevDataRef);
+
+ # Return a filtered hash containing the job-related parameters and values
+ def getJobData (this, pid, params):
+ if not this.JOBS.has_key(pid):
+ return [];
+ return this.getFilteredData(this.JOBS[pid]['DATA'], params);
+
+ ############################################################################################
+ # internal functions for system monitoring
+ ############################################################################################
+
+ # this has to be run twice (with the $lastUpdateTime updated) to get some useful results
+ # the information about blocks_in/out and swap_in/out isn't available for 2.6 kernels (yet)
+ def readStat (this):
+ try:
+ FSTAT = open('/proc/stat');
+ line = FSTAT.readline();
+ while(line != ''):
+ if(line.startswith("cpu ")):
+ elem = re.split("\s+", line);
+ this.DATA['raw_cpu_usr'] = float(elem[1]);
+ this.DATA['raw_cpu_nice'] = float(elem[2]);
+ this.DATA['raw_cpu_sys'] = float(elem[3]);
+ this.DATA['raw_cpu_idle'] = float(elem[4]);
+ this.DATA['raw_cpu_iowait'] = float(elem[5]);
+ elif(line.startswith("page")):
+ elem = line.split();
+ this.DATA['raw_blocks_in'] = float(elem[1]);
+ this.DATA['raw_blocks_out'] = float(elem[2]);
+ elif(line.startswith('swap')):
+ elem = line.split();
+ this.DATA['raw_swap_in'] = float(elem[1]);
+ this.DATA['raw_swap_out'] = float(elem[2]);
+ elif(line.startswith('intr')):
+ elem = line.split()
+ this.DATA['raw_interrupts'] = float(elem[1]);
+ elif(line.startswith('ctxt')):
+ elem = line.split()
+ this.DATA['raw_context_switches'] = float(elem[1]);
+ line = FSTAT.readline();
+ FSTAT.close();
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot open /proc/stat");
+ return;
+ try:
+ # blocks_in/out and swap_in/out are moved to /proc/vmstat in >2.5 kernels
+ FVMSTAT = open('/proc/vmstat');
+ line = FVMSTAT.readline();
+ while(line != ''):
+ elem = re.split("\s+", line);
+ if(line.startswith("pgpgin ")):
+ this.DATA['raw_blocks_in'] = float(elem[1]);
+ elif(line.startswith("pgpgout ")):
+ this.DATA['raw_blocks_out'] = float(elem[1]);
+ elif(line.startswith("pswpin ")):
+ this.DATA['raw_swap_in'] = float(elem[1]);
+ elif(line.startswith("pswpout ")):
+ this.DATA['raw_swap_out'] = float(elem[1]);
+ line = FVMSTAT.readline();
+ FVMSTAT.close();
+ except IOError, ex:
+ this.logger.log(Logger.NOTICE, "ProcInfo: cannot open /proc/vmstat");
+
+ # sizes are reported in MB (except _usage that is in percent).
+ def readMemInfo (this):
+ try:
+ FMEM = open('/proc/meminfo');
+ line = FMEM.readline();
+ while(line != ''):
+ elem = re.split("\s+", line);
+ if(line.startswith("MemFree:")):
+ this.DATA['mem_free'] = float(elem[1]) / 1024.0;
+ if(line.startswith("MemTotal:")):
+ this.DATA['total_mem'] = float(elem[1]) / 1024.0;
+ if(line.startswith("SwapFree:")):
+ this.DATA['swap_free'] = float(elem[1]) / 1024.0;
+ if(line.startswith("SwapTotal:")):
+ this.DATA['total_swap'] = float(elem[1]) / 1024.0;
+ if(line.startswith("Buffers:")):
+ this.DATA['mem_buffers'] = float(elem[1]) / 1024.0;
+ if(line.startswith("Cached:")):
+ this.DATA['mem_cached'] = float(elem[1]) / 1024.0;
+ line = FMEM.readline();
+ FMEM.close();
+ if this.DATA.has_key('mem_free') and this.DATA.has_key('mem_buffers') and this.DATA.has_key('mem_cached'):
+ this.DATA['mem_actualfree'] = this.DATA['mem_free'] + this.DATA['mem_buffers'] + this.DATA['mem_cached'];
+ if this.DATA.has_key('total_mem') and this.DATA.has_key('mem_actualfree'):
+ this.DATA['mem_used'] = this.DATA['total_mem'] - this.DATA['mem_actualfree'];
+ if this.DATA.has_key('total_swap') and this.DATA.has_key('swap_free'):
+ this.DATA['swap_used'] = this.DATA['total_swap'] - this.DATA['swap_free'];
+ if this.DATA.has_key('mem_used') and this.DATA.has_key('total_mem') and this.DATA['total_mem'] > 0:
+ this.DATA['mem_usage'] = 100.0 * this.DATA['mem_used'] / this.DATA['total_mem'];
+ if this.DATA.has_key('swap_used') and this.DATA.has_key('total_swap') and this.DATA['total_swap'] > 0:
+ this.DATA['swap_usage'] = 100.0 * this.DATA['swap_used'] / this.DATA['total_swap'];
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot open /proc/meminfo");
+ return;
+
+ # read system load average
+ def readLoadAvg (this):
+ try:
+ FAVG = open('/proc/loadavg');
+ line = FAVG.readline();
+ FAVG.close();
+ elem = re.split("\s+", line);
+ this.DATA['load1'] = float(elem[0]);
+ this.DATA['load5'] = float(elem[1]);
+ this.DATA['load15'] = float(elem[2]);
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot open /proc/meminfo");
+ return;
+
+
+ # read system load average on Darwin
+ def darwin_readLoadAvg (this):
+ try:
+ LOAD_AVG = os.popen('sysctl vm.loadavg');
+ line = LOAD_AVG.readline();
+ LOAD_AVG.close();
+ elem = re.split("\s+", line);
+ this.DATA['load1'] = float(elem[1]);
+ this.DATA['load5'] = float(elem[2]);
+ this.DATA['load15'] = float(elem[3]);
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot run 'sysctl vm.loadavg");
+ return;
+
+
+ # read the number of processes currently running on the system
+ def countProcesses (this):
+ """
+ # old version
+ nr = 0;
+ try:
+ for file in os.listdir("/proc"):
+ if re.match("\d+", file):
+ nr += 1;
+ this.DATA['processes'] = nr;
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot open /proc to count processes");
+ return;
+ """
+ # new version
+ total = 0;
+ states = {'D':0, 'R':0, 'S':0, 'T':0, 'Z':0};
+ try:
+ output = os.popen('ps -A -o state');
+ line = output.readline();
+ while(line != ''):
+ if states.has_key(line[0]):
+ states[line[0]] = states[line[0]] + 1;
+ else:
+ states[line[0]] = 1;
+ total = total + 1;
+ line = output.readline();
+ output.close();
+ this.DATA['processes'] = total;
+ for key in states.keys():
+ this.DATA['processes_'+key] = states[key];
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot get output from ps command");
+ return;
+
+ # reads the IP, hostname, cpu_MHz, uptime
+ def readGenericInfo (this):
+ this.DATA['hostname'] = socket.getfqdn();
+ try:
+ output = os.popen('/sbin/ifconfig -a')
+ eth, ip = '', '';
+ line = output.readline();
+ while(line != ''):
+ line = line.strip();
+ if line.startswith("eth"):
+ elem = line.split();
+ eth = elem[0];
+ ip = '';
+ if len(eth) > 0 and line.startswith("inet addr:"):
+ ip = re.match("inet addr:(\d+\.\d+\.\d+\.\d+)", line).group(1);
+ this.DATA[eth + '_ip'] = ip;
+ eth = '';
+ line = output.readline();
+ output.close();
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot get output from /sbin/ifconfig -a");
+
+ try:
+ no_cpus = 0;
+ FCPU = open('/proc/cpuinfo');
+ line = FCPU.readline();
+ while(line != ''):
+ if line.startswith("cpu MHz"):
+ this.DATA['cpu_MHz'] = float(re.match("cpu MHz\s+:\s+(\d+\.?\d*)", line).group(1));
+ no_cpus += 1;
+
+ if line.startswith("vendor_id"):
+ this.DATA['cpu_vendor_id'] = re.match("vendor_id\s+:\s+(.+)", line).group(1);
+
+ if line.startswith("cpu family"):
+ this.DATA['cpu_family'] = re.match("cpu family\s+:\s+(.+)", line).group(1);
+
+ if line.startswith("model") and not line.startswith("model name") :
+ this.DATA['cpu_model'] = re.match("model\s+:\s+(.+)", line).group(1);
+
+ if line.startswith("model name"):
+ this.DATA['cpu_model_name'] = re.match("model name\s+:\s+(.+)", line).group(1);
+
+ if line.startswith("bogomips"):
+ this.DATA['bogomips'] = float(re.match("bogomips\s+:\s+(\d+\.?\d*)", line).group(1));
+
+ line = FCPU.readline();
+ FCPU.close();
+ this.DATA['no_CPUs'] = no_cpus;
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot open /proc/cpuinfo");
+
+ # try to determine the kernel version
+ try:
+ output = os.popen('uname -r');
+ line = output.readline().replace("\n", "");
+ this.DATA['kernel_version'] = line;
+ output.close();
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot get kernel version with 'uname -r'");
+
+ # try to determine the platform
+ try:
+ output = os.popen('uname -m 2>/dev/null || uname');
+ line = output.readline().replace("\n", "");
+ this.DATA['platform'] = line
+ output.close();
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot get platform with 'uname -m'");
+
+ # try to determine the OS type
+ osType = None;
+ try:
+ output = os.popen('env PATH=$PATH:/bin:/usr/bin lsb_release -d 2>/dev/null');
+ line = output.readline().replace("\n", "");
+ mo = re.match("Description:\s*(.*)", line)
+ if not mo is None:
+ osType = mo.group(1);
+ output.close();
+ except IOError, ex:
+ pass
+ # if lsb_release didn't work, try again...
+ if osType is None:
+ for file in ["/etc/redhat-release", "/etc/debian_version", "/etc/SuSE-release", "/etc/slackware-version", "/etc/gentoo-release", "/etc/mandrake-release", "/etc/mandriva-release", "/etc/issue"]:
+ try:
+ REL = open(file);
+ line = REL.readline().replace("\n", "");
+ if len(line) > 0:
+ osType = line;
+ REL.close();
+ break;
+ except IOError, ex:
+ pass
+ # if none of these, try just uname -s
+ if osType is None:
+ try:
+ output = os.popen('uname -s');
+ line = output.readline().replace("\n", "");
+ osType = line;
+ output.close();
+ except IOError, ex:
+ pass
+ if not osType is None:
+ this.DATA['os_type'] = osType;
+ else:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot determine operating system type");
+
+ # read system's uptime and load average. Time is reported as a floating number, in days.
+ # It uses the 'uptime' command which's output looks like these:
+ # 19:55:37 up 11 days, 18:57, 1 user, load average: 0.00, 0.00, 0.00
+ # 18:42:31 up 87 days, 18:10, 9 users, load average: 0.64, 0.84, 0.80
+ # 6:42pm up 7 days 3:08, 7 users, load average: 0.18, 0.14, 0.10
+ # 6:42pm up 33 day(s), 1:54, 1 user, load average: 0.01, 0.00, 0.00
+ # 18:42 up 7 days, 3:45, 2 users, load averages: 1.10 1.11 1.06
+ # 18:47:41 up 7 days, 4:35, 19 users, load average: 0.66, 0.44, 0.41
+ # 11:57am up 2:21, 22 users, load average: 0.59, 0.93, 0.73
+ def readUptimeAndLoadAvg(this):
+ try:
+ output = os.popen('uptime');
+ line = output.readline().replace("\n", "");
+ mo = re.match(".*up\s+((\d+)\s+day[ (s),]+)?(\d+)(:(\d+))?[^\d]+(\d+)[^\d]+([\d\.]+)[^\d]+([\d\.]+)[^\d]+([\d\.]+)", line);
+ if mo is None:
+ this.logger.log(Logger.ERROR, "ProcInfo: got nonparsable output from uptime: "+line);
+ return
+ (days, hour, min, users, load1, load5, load15) = (mo.group(2), mo.group(3), mo.group(5), float(mo.group(6)), mo.group(7), mo.group(8), mo.group(9));
+ if days is None:
+ days = 0.0;
+ if min is None:
+ (min, hour) = (hour, 0.0);
+ uptime = float(days) + float(hour) / 24.0 + float(min) / 1440.0;
+ this.DATA['uptime'] = float(uptime);
+ this.DATA['logged_users'] = float(users); # this is currently not reported
+ this.DATA['load1'] = float(load1);
+ this.DATA['load5'] = float(load5);
+ this.DATA['load15'] = float(load15);
+ output.close();
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot get output from uptime command");
+
+ # do a difference with overflow check and repair
+ # the counter is unsigned 32 or 64 bit
+ def diffWithOverflowCheck(this, new, old):
+ if new >= old:
+ return new - old;
+ else:
+ max = (1L << 31) * 2; # 32 bits
+ if old >= max:
+ max = (1L << 63) * 2; # 64 bits
+ return new - old + max;
+
+ # read network information like transfered kBps and nr. of errors on each interface
+ def readNetworkInfo (this):
+ try:
+ FNET = open('/proc/net/dev');
+ line = FNET.readline();
+ while(line != ''):
+ m = re.match("\s*eth(\d):(\d+)\s+\d+\s+(\d+)\s+\d+\s+\d+\s+\d+\s+\d+\s+\d+\s+(\d+)\s+\d+\s+(\d+)", line);
+ if m != None:
+ this.DATA['raw_eth'+m.group(1)+'_in'] = float(m.group(2));
+ this.DATA['raw_eth'+m.group(1)+'_out'] = float(m.group(4));
+ this.DATA['raw_eth'+m.group(1)+'_errs'] = int(m.group(3)) + int(m.group(5));
+ line = FNET.readline();
+ FNET.close();
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot open /proc/net/dev");
+ return;
+
+ # run nestat and collect sockets info (tcp, udp, unix) and connection states for tcp sockets from netstat
+ def readNetStat(this):
+ try:
+ output = os.popen('netstat -an 2>/dev/null');
+ sockets = { 'sockets_tcp':0, 'sockets_udp':0, 'sockets_unix':0, 'sockets_icm':0 };
+ tcp_details = { 'sockets_tcp_ESTABLISHED':0, 'sockets_tcp_SYN_SENT':0,
+ 'sockets_tcp_SYN_RECV':0, 'sockets_tcp_FIN_WAIT1':0, 'sockets_tcp_FIN_WAIT2':0,
+ 'sockets_tcp_TIME_WAIT':0, 'sockets_tcp_CLOSED':0, 'sockets_tcp_CLOSE_WAIT':0,
+ 'sockets_tcp_LAST_ACK':0, 'sockets_tcp_LISTEN':0, 'sockets_tcp_CLOSING':0,
+ 'sockets_tcp_UNKNOWN':0 };
+ line = output.readline();
+ while(line != ''):
+ arg = string.split(line);
+ proto = arg[0];
+ if proto.find('tcp') == 0:
+ sockets['sockets_tcp'] += 1;
+ state = arg[len(arg)-1];
+ key = 'sockets_tcp_'+state;
+ if tcp_details.has_key(key):
+ tcp_details[key] += 1;
+ if proto.find('udp') == 0:
+ sockets['sockets_udp'] += 1;
+ if proto.find('unix') == 0:
+ sockets['sockets_unix'] += 1;
+ if proto.find('icm') == 0:
+ sockets['sockets_icm'] += 1;
+
+ line = output.readline();
+ output.close();
+
+ for key in sockets.keys():
+ this.DATA[key] = sockets[key];
+ for key in tcp_details.keys():
+ this.DATA[key] = tcp_details[key];
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot get output from netstat command");
+ return;
+
+ ##############################################################################################
+ # job monitoring related functions
+ ##############################################################################################
+
+ # internal function that gets the full list of children (pids) for a process (pid)
+ def getChildren (this, parent):
+ pidmap = {};
+ try:
+ output = os.popen('ps -A -o "pid ppid"');
+ line = output.readline(); # skip headers
+ line = output.readline();
+ while(line != ''):
+ line = line.strip();
+ elem = re.split("\s+", line);
+ pidmap[int(elem[0])] = int(elem[1]);
+ line = output.readline();
+ output.close();
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot execute ps -A -o \"pid ppid\"");
+
+ if not pidmap.has_key(parent):
+ this.logger.log(Logger.INFO, 'ProcInfo: No job with pid='+str(parent));
+ this.removeJobToMonitor(parent);
+ return [];
+
+ children = [parent];
+ i = 0;
+ while(i < len(children)):
+ prnt = children[i];
+ for (pid, ppid) in pidmap.items():
+ if ppid == prnt:
+ children.append(pid);
+ i += 1;
+ return children;
+
+ # internal function that parses a time formatted like "days-hours:min:sec" and returns the corresponding
+ # number of seconds.
+ def parsePSTime (this, my_time):
+ my_time = my_time.strip();
+ m = re.match("(\d+)-(\d+):(\d+):(\d+)", my_time);
+ if m != None:
+ return int(m.group(1)) * 24 * 3600 + int(m.group(2)) * 3600 + int(m.group(3)) * 60 + int(m.group(4));
+ else:
+ m = re.match("(\d+):(\d+):(\d+)", my_time);
+ if(m != None):
+ return int(m.group(1)) * 3600 + int(m.group(2)) * 60 + int(m.group(3));
+ else:
+ m = re.match("(\d+):(\d+)", my_time);
+ if(m != None):
+ return int(m.group(1)) * 60 + int(m.group(2));
+ else:
+ return None;
+
+ # read information about this the JOB_PID process
+ # memory sizes are given in KB
+ def readJobInfo (this, pid):
+ if (pid == '') or not this.JOBS.has_key(pid):
+ return;
+ children = this.getChildren(pid);
+ if(len(children) == 0):
+ this.logger.log(Logger.INFO, "ProcInfo: Job with pid="+str(pid)+" terminated; removing it from monitored jobs.");
+ #print ":("
+ this.removeJobToMonitor(pid);
+ return;
+ try:
+ JSTATUS = os.popen("ps --no-headers --pid " + ",".join([`child` for child in children]) + " -o pid,etime,time,%cpu,%mem,rsz,vsz,comm");
+ mem_cmd_map = {};
+ etime, cputime, pcpu, pmem, rsz, vsz, comm, fd = None, None, None, None, None, None, None, None;
+ line = JSTATUS.readline();
+ while(line != ''):
+ line = line.strip();
+ m = re.match("(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(.+)", line);
+ if m != None:
+ apid, etime1, cputime1, pcpu1, pmem1, rsz1, vsz1, comm1 = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5), m.group(6), m.group(7), m.group(8);
+ sec = this.parsePSTime(etime1);
+ if (not sec is None) and (sec > this.addIfValid(etime, 0)): # the elapsed time is the maximum of all elapsed
+ etime = sec;
+ sec = this.parsePSTime(cputime1); # times corespornding to all child processes.
+ cputime = this.addIfValid(cputime, sec); # total cputime is the sum of cputimes for all processes.
+ pcpu = this.addIfValid(pcpu, float(pcpu1)); # total %cpu is the sum of all children %cpu.
+ if not mem_cmd_map.has_key(`pmem1`+" "+`rsz1`+" "+`vsz1`+" "+`comm1`):
+ # it's the first thread/process with this memory footprint; add it.
+ mem_cmd_map[`pmem1`+" "+`rsz1`+" "+`vsz1`+" "+`comm1`] = 1;
+ pmem = this.addIfValid(pmem, float(pmem1));
+ rsz = this.addIfValid(rsz, int(rsz1));
+ vsz = this.addIfValid(vsz, int(vsz1));
+ fd = this.addIfValid(fd, this.countOpenFD(apid));
+ # else not adding memory usage
+ line = JSTATUS.readline();
+ JSTATUS.close();
+ if not etime is None:
+ this.JOBS[pid]['DATA']['run_time'] = etime;
+ if not cputime is None:
+ this.JOBS[pid]['DATA']['cpu_time'] = cputime;
+ if not pcpu is None:
+ this.JOBS[pid]['DATA']['cpu_usage'] = pcpu;
+ if not pmem is None:
+ this.JOBS[pid]['DATA']['mem_usage'] = pmem;
+ if not rsz is None:
+ this.JOBS[pid]['DATA']['rss'] = rsz;
+ if not vsz is None:
+ this.JOBS[pid]['DATA']['virtualmem'] = vsz;
+ if not fd is None:
+ this.JOBS[pid]['DATA']['open_files'] = fd;
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot execute ps --no-headers -eo \"pid ppid\"");
+
+ # return the sum of the two given parameters (or None if the case)
+ def addIfValid (this, sum, toAdd):
+ if toAdd is None:
+ return sum
+ if sum is None:
+ return toAdd
+ return sum + toAdd;
+
+ # count the number of open files for the given pid
+ def countOpenFD (this, pid):
+ dir = '/proc/'+str(pid)+'/fd';
+ if os.access(dir, os.F_OK):
+ if os.access(dir, os.X_OK):
+ list = os.listdir(dir);
+ open_files = len(list);
+ if pid == os.getpid():
+ open_files -= 2;
+ this.logger.log(Logger.DEBUG, "Counting open_files for "+ `pid` +": "+ str(len(list)) +" => " + `open_files` + " open_files");
+ return open_files;
+ else:
+ this.logger.log(Logger.ERROR, "ProcInfo: cannot count the number of opened files for job "+`pid`);
+ else:
+ this.logger.log(Logger.ERROR, "ProcInfo: job "+`pid`+" dosen't exist");
+ # failed
+ return None;
+
+ # if there is an work directory defined, then compute the used space in that directory
+ # and the free disk space on the partition to which that directory belongs
+ # sizes are given in MB
+ def readJobDiskUsage (this, pid):
+ if (pid == '') or not this.JOBS.has_key(pid):
+ return;
+ workDir = this.JOBS[pid]['WORKDIR'];
+ if workDir == '':
+ return;
+ try:
+ DU = os.popen("du -Lsck " + workDir + " | tail -1 | cut -f 1");
+ line = DU.readline();
+ this.JOBS[pid]['DATA']['workdir_size'] = int(line) / 1024.0;
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ERROR", "ProcInfo: cannot run du to get job's disk usage for job "+`pid`);
+ try:
+ DF = os.popen("df -k "+workDir+" | tail -1");
+ line = DF.readline().strip();
+ m = re.match("\S+\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)%", line);
+ if m != None:
+ this.JOBS[pid]['DATA']['disk_total'] = float(m.group(1)) / 1024.0;
+ this.JOBS[pid]['DATA']['disk_used'] = float(m.group(2)) / 1024.0;
+ this.JOBS[pid]['DATA']['disk_free'] = float(m.group(3)) / 1024.0;
+ this.JOBS[pid]['DATA']['disk_usage'] = float(m.group(4)) / 1024.0;
+ DF.close();
+ except IOError, ex:
+ this.logger.log(Logger.ERROR, "ERROR", "ProcInfo: cannot run df to get job's disk usage for job "+`pid`);
+
+ # create cummulative parameters based on raw params like cpu_, blocks_, swap_, or ethX_
+ def computeCummulativeParams(this, dataRef, prevDataRef):
+ if prevDataRef == {}:
+ for key in dataRef.keys():
+ if key.find('raw_') == 0:
+ prevDataRef[key] = dataRef[key];
+ prevDataRef['TIME'] = dataRef['TIME'];
+ return;
+
+ # cpu -related params
+ if (dataRef.has_key('raw_cpu_usr')) and (prevDataRef.has_key('raw_cpu_usr')):
+ diff={};
+ cpu_sum = 0;
+ for param in ['cpu_usr', 'cpu_nice', 'cpu_sys', 'cpu_idle', 'cpu_iowait']:
+ diff[param] = this.diffWithOverflowCheck(dataRef['raw_'+param], prevDataRef['raw_'+param]);
+ cpu_sum += diff[param];
+ for param in ['cpu_usr', 'cpu_nice', 'cpu_sys', 'cpu_idle', 'cpu_iowait']:
+ if cpu_sum != 0:
+ dataRef[param] = 100.0 * diff[param] / cpu_sum;
+ else:
+ del dataRef[param];
+ if cpu_sum != 0:
+ dataRef['cpu_usage'] = 100.0 * (cpu_sum - diff['cpu_idle']) / cpu_sum;
+ else:
+ del dataRef['cpu_usage'];
+ # add the other parameters
+ for param in ['interrupts', 'context_switches']:
+ if(prevDataRef.has_key('raw_'+param) and dataRef.has_key('raw_'+param)):
+ dataRef[param] = this.diffWithOverflowCheck(dataRef['raw_'+param], prevDataRef['raw_'+param]);
+ else:
+ del dataRef[param];
+
+ # swap, blocks, context switches, interrupts
+ interval = dataRef['TIME'] - prevDataRef['TIME'];
+ for param in ['blocks_in', 'blocks_out', 'swap_in', 'swap_out', 'interrupts', 'context_switches']:
+ if((interval != 0) and prevDataRef.has_key('raw_'+param) and dataRef.has_key('raw_'+param)):
+ diff = this.diffWithOverflowCheck(dataRef['raw_'+param], prevDataRef['raw_'+param]);
+ dataRef[param+'_R'] = diff / interval;
+ else:
+ del dataRef[param+'_R'];
+
+ # eth - related params
+ for rawParam in dataRef.keys():
+ if (rawParam.find('raw_eth') == 0) and prevDataRef.has_key(rawParam):
+ param = rawParam.split('raw_')[1];
+ if interval != 0:
+ dataRef[param] = this.diffWithOverflowCheck(dataRef[rawParam], prevDataRef[rawParam]); # absolute difference
+ if param.find('_errs') == -1:
+ dataRef[param] = dataRef[param] / interval / 1024.0; # if it's _in or _out, compute in KB/sec
+ else:
+ del dataRef[param];
+
+ # copy contents of the current data values to the
+ for param in dataRef.keys():
+ if param.find('raw_') == 0:
+ prevDataRef[param] = dataRef[param];
+ prevDataRef['TIME'] = dataRef['TIME'];
+
+
+ # Return a hash containing (param,value) pairs with existing values from the requested ones
+ def getFilteredData (this, dataHash, paramsList, prevDataHash = None):
+
+ if not prevDataHash is None:
+ this.computeCummulativeParams(dataHash, prevDataHash);
+
+ result = {};
+ for param in paramsList:
+ if param == 'net_sockets':
+ for key in dataHash.keys():
+ if key.find('sockets') == 0 and key.find('sockets_tcp_') == -1:
+ result[key] = dataHash[key];
+ elif param == 'net_tcp_details':
+ for key in dataHash.keys():
+ if key.find('sockets_tcp_') == 0:
+ result[key] = dataHash[key];
+
+ m = re.match("^net_(.*)$", param);
+ if m == None:
+ m = re.match("^(ip)$", param);
+ if m != None:
+ net_param = m.group(1);
+ #this.logger.log(Logger.DEBUG, "Querying param "+net_param);
+ for key, value in dataHash.items():
+ m = re.match("eth\d_"+net_param, key);
+ if m != None:
+ result[key] = value;
+ else:
+ if param == 'processes':
+ for key in dataHash.keys():
+ if key.find('processes') == 0:
+ result[key] = dataHash[key];
+ elif dataHash.has_key(param):
+ result[param] = dataHash[param];
+ sorted_result = [];
+ keys = result.keys();
+ keys.sort();
+ for key in keys:
+ sorted_result.append((key, result[key]));
+ return sorted_result;
+
+######################################################################################
+# self test
+
+if __name__ == '__main__':
+ logger = Logger.Logger(Logger.DEBUG);
+ pi = ProcInfo(logger);
+
+ print "first update";
+ pi.update();
+ print "Sleeping to accumulate";
+ time.sleep(1);
+ pi.update();
+
+ print "System Monitoring:";
+ sys_cpu_params = ['cpu_usr', 'cpu_sys', 'cpu_idle', 'cpu_nice', 'cpu_usage', 'context_switches', 'interrupts'];
+ sys_io_params = ['blocks_in', 'blocks_out', 'swap_in', 'swap_out'];
+ sys_mem_params = ['mem_used', 'mem_free', 'total_mem', 'mem_usage'];
+ sys_swap_params = ['swap_used', 'swap_free', 'total_swap', 'swap_usage'];
+ sys_load_params = ['load1', 'load5', 'load15', 'processes', 'uptime'];
+ sys_gen_params = ['hostname', 'cpu_MHz', 'no_CPUs', 'cpu_vendor_id', 'cpu_family', 'cpu_model', 'cpu_model_name', 'bogomips'];
+ sys_net_params = ['net_in', 'net_out', 'net_errs', 'ip'];
+ sys_net_stat = ['sockets_tcp', 'sockets_udp', 'sockets_unix', 'sockets_icm'];
+ sys_tcp_details = ['sockets_tcp_ESTABLISHED', 'sockets_tcp_SYN_SENT', 'sockets_tcp_SYN_RECV', 'sockets_tcp_FIN_WAIT1', 'sockets_tcp_FIN_WAIT2', 'sockets_tcp_TIME_WAIT', 'sockets_tcp_CLOSED', 'sockets_tcp_CLOSE_WAIT', 'sockets_tcp_LAST_ACK', 'sockets_tcp_LISTEN', 'sockets_tcp_CLOSING', 'sockets_tcp_UNKNOWN'];
+
+ print "sys_cpu_params", pi.getSystemData(sys_cpu_params);
+ print "sys_io_params", pi.getSystemData(sys_io_params);
+ print "sys_mem_params", pi.getSystemData(sys_mem_params);
+ print "sys_swap_params", pi.getSystemData(sys_swap_params);
+ print "sys_load_params", pi.getSystemData(sys_load_params);
+ print "sys_gen_params", pi.getSystemData(sys_gen_params);
+ print "sys_net_params", pi.getSystemData(sys_net_params);
+ print "sys_net_stat", pi.getSystemData(sys_net_stat);
+ print "sys_tcp_details", pi.getSystemData(sys_tcp_details);
+
+ job_pid = os.getpid();
+
+ print "Job (mysefl) monitoring:";
+ pi.addJobToMonitor(job_pid, os.getcwd());
+ print "Sleep another second";
+ time.sleep(1);
+ pi.update();
+
+ job_cpu_params = ['run_time', 'cpu_time', 'cpu_usage'];
+ job_mem_params = ['mem_usage', 'rss', 'virtualmem', 'open_files'];
+ job_disk_params = ['workdir_size', 'disk_used', 'disk_free', 'disk_total', 'disk_usage'];
+ time.sleep(10);
+ print "job_cpu_params", pi.getJobData(job_pid, job_cpu_params);
+ print "job_mem_params", pi.getJobData(job_pid, job_mem_params);
+ print "job_disk_params", pi.getJobData(job_pid, job_disk_params);
+
+ pi.removeJobToMonitor(os.getpid());
--- /dev/null
+
+"""
+ * ApMon - Application Monitoring Tool
+ * Version: 2.2.13
+ *
+ * Copyright (C) 2006 California Institute of Technology
+ *
+ * Permission is hereby granted, free of charge, to use, copy and modify
+ * this software and its documentation (the "Software") for any
+ * purpose, provided that existing copyright notices are retained in
+ * all copies and that this notice is included verbatim in any distributions
+ * or substantial portions of the Software.
+ * This software is a part of the MonALISA framework (http://monalisa.cacr.caltech.edu).
+ * Users of the Software are asked to feed back problems, benefits,
+ * and/or suggestions about the software to the MonALISA Development Team
+ * (developers@monalisa.cern.ch). Support for this software - fixing of bugs,
+ * incorporation of new features - is done on a best effort basis. All bug
+ * fixes and enhancements will be made available under the same terms and
+ * conditions as the original software,
+
+ * IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY DERIVATIVES THEREOF,
+ * EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ * THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE IS
+ * PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+ * MODIFICATIONS.
+"""
+
+"""
+apmon.py
+
+This is a python implementation for the ApMon API for sending
+data to the MonALISA service.
+
+For further details about ApMon please see the C/C++ or Java documentation
+You can find a sample usage of this module in apmTest.py.
+
+Note that the parameters must be either integers(32 bits) or doubles(64 bits).
+Sending strings is supported, but they will not be stored in the
+farm's store nor shown in the farm's window in the MonALISA client.
+"""
+
+import re
+import xdrlib
+import socket
+import struct
+import StringIO
+import threading
+import time
+from Logger import *
+import ProcInfo
+import random
+import copy
+import os
+
+#__all__ = ["ApMon"]
+
+#__debug = False # set this to True to be verbose
+
+class ApMon:
+ """
+ Main class for sending monitoring data to a MonaLisa module.
+ One or more destinations can be chosen for the data. See constructor.
+
+ The data is packed in UDP datagrams, using XDR. The following fields are sent:
+ - version & password (string)
+ - cluster name (string)
+ - node name (string)
+ - number of parameters (int)
+ - for each parameter:
+ - name (string)
+ - value type (int)
+ - value
+ - optionally a (int) with the given timestamp
+
+ Attributes (public):
+ - destinations - a list containing (ip, port, password) tuples
+ - configAddresses - list with files and urls from where the config is read
+ - configRecheckInterval - period, in seconds, to check for changes
+ in the configAddresses list
+ - configRecheck - boolean - whether to recheck periodically for changes
+ in the configAddresses list
+ """
+
+ __defaultOptions = {
+ 'job_monitoring': True, # perform (or not) job monitoring
+ 'job_interval' : 120, # at this interval (in seconds)
+ 'job_data_sent' : 0, # time from Epoch when job information was sent; don't touch!
+
+ 'job_cpu_time' : True, # elapsed time from the start of this job in seconds
+ 'job_run_time' : True, # processor time spent running this job in seconds
+ 'job_cpu_usage' : True, # current percent of the processor used for this job, as reported by ps
+ 'job_virtualmem': True, # size in JB of the virtual memory occupied by the job, as reported by ps
+ 'job_rss' : True, # size in KB of the resident image size of the job, as reported by ps
+ 'job_mem_usage' : True, # percent of the memory occupied by the job, as reported by ps
+ 'job_workdir_size': True, # size in MB of the working directory of the job
+ 'job_disk_total': True, # size in MB of the total size of the disk partition containing the working directory
+ 'job_disk_used' : True, # size in MB of the used disk partition containing the working directory
+ 'job_disk_free' : True, # size in MB of the free disk partition containing the working directory
+ 'job_disk_usage': True, # percent of the used disk partition containing the working directory
+ 'job_open_files': True, # number of open file descriptors
+
+ 'sys_monitoring': True, # perform (or not) system monitoring
+ 'sys_interval' : 120, # at this interval (in seconds)
+ 'sys_data_sent' : 0, # time from Epoch when system information was sent; don't touch!
+
+ 'sys_cpu_usr' : True, # cpu-usage information
+ 'sys_cpu_sys' : True, # all these will produce coresponding paramas without "sys_"
+ 'sys_cpu_nice' : True,
+ 'sys_cpu_idle' : True,
+ 'sys_cpu_iowait': True,
+ 'sys_cpu_usage' : True,
+ 'sys_interrupts_R': True,
+ 'sys_context_switches_R' : True,
+ 'sys_load1' : True, # system load information
+ 'sys_load5' : True,
+ 'sys_load15' : True,
+ 'sys_mem_used' : True, # memory usage information
+ 'sys_mem_free' : True,
+ 'sys_mem_actualfree': True, # actually free amount of mem: free + buffers + cached
+ 'sys_mem_usage' : True,
+ 'sys_mem_buffers':True,
+ 'sys_mem_cached': True,
+ 'sys_blocks_in_R' : True,
+ 'sys_blocks_out_R': True,
+ 'sys_swap_used' : True, # swap usage information
+ 'sys_swap_free' : True,
+ 'sys_swap_usage': True,
+ 'sys_swap_in_R' : True,
+ 'sys_swap_out_R' : True,
+ 'sys_net_in' : True, # network transfer in kBps
+ 'sys_net_out' : True, # these will produce params called ethX_in, ethX_out, ethX_errs
+ 'sys_net_errs' : True, # for each eth interface
+ 'sys_net_sockets' : True, # number of opened sockets for each proto => sockets_tcp/udp/unix ...
+ 'sys_net_tcp_details' : True, # number of tcp sockets in each state => sockets_tcp_LISTEN, ...
+ 'sys_processes' : True,
+ 'sys_uptime' : True, # uptime of the machine, in days (float number)
+
+ 'general_info' : True, # send (or not) general host information once every 2 x $sys_interval seconds
+ 'general_data_sent': 0, # time from Epoch when general information was sent; don't touch!
+
+ 'hostname' : True,
+ 'ip' : True, # will produce ethX_ip params for each interface
+ 'cpu_MHz' : True,
+ 'no_CPUs' : True, # number of CPUs
+ 'kernel_version': True,
+ 'platform' : True,
+ 'os_type' : True,
+ 'total_mem' : True,
+ 'total_swap' : True,
+ 'cpu_vendor_id' : True,
+ 'cpu_family' : True,
+ 'cpu_model' : True,
+ 'cpu_model_name': True,
+ 'bogomips' : True};
+
+ def __init__ (self, initValue, defaultLogLevel = Logger.INFO):
+ """
+ Class constructor:
+ - if initValue is a string, put it in configAddresses and load destinations
+ from the file named like that. if it starts with "http://", the configuration
+ is loaded from that URL. For background monitoring, given parameters will overwrite defaults
+
+ - if initValue is a list, put its contents in configAddresses and create
+ the list of destinations from all those sources. For background monitoring,
+ given parameters will overwrite defaults (see __defaultOptions)
+
+ - if initValue is a tuple (of strings), initialize destinations with that values.
+ Strings in this tuple have this form: "{hostname|ip}[:port][ passwd]", the
+ default port being 8884 and the default password being "". Background monitoring will be
+ enabled sending the parameters active from __defaultOptions (see end of file)
+
+ - if initValue is a hash (key = string(hostname|ip[:port][ passwd]),
+ val = hash{'param_name': True/False, ...}) the given options for each destination
+ will overwrite the default parameters (see __defaultOptions)
+ """
+ self.destinations = {} # empty, by default; key = tuple (host, port, pass) ; val = hash {"param_mame" : True/False, ...}
+ self.destPrevData = {} # empty, by defaul; key = tuple (host, port, pass) ; val = hash {"param_mame" : value, ...}
+ self.senderRef = {} # key = tuple (host, port, pass); val = hash {'INSTANCE_ID', 'SEQ_NR' }
+ self.configAddresses = [] # empty, by default; list of files/urls from where we read config
+ self.configRecheckInterval = 600 # 10 minutes
+ self.configRecheck = True # enabled by default
+ self.performBgMonitoring = True # by default, perform background monitoring
+ self.monitoredJobs = {} # Monitored jobs; key = pid; value = hash with
+ self.maxMsgRate = 10 # Maximum number of messages allowed to be sent per second
+ self.maxMsgSize = 1440 # Maximum size of a message. Bulk parameters are split in several messages of smaller size
+ self.__defaultSenderRef = {'INSTANCE_ID': self.__getInstanceID(), 'SEQ_NR': 0};
+ self.__defaultUserCluster = "ApMon_UserSend";
+ self.__defaultUserNode = socket.getfqdn();
+ self.__defaultSysMonCluster = "ApMon_SysMon";
+ self.__defaultSysMonNode = socket.getfqdn();
+ # don't touch these:
+ self.__freed = False
+ self.__udpSocket = None
+ self.__configUpdateLock = threading.Lock()
+ self.__configUpdateEvent = threading.Event()
+ self.__configUpdateFinished = threading.Event()
+ self.__bgMonitorLock = threading.Lock()
+ self.__bgMonitorEvent = threading.Event()
+ self.__bgMonitorFinished = threading.Event()
+ # don't allow a user to send more than MAX_MSG messages per second, in average
+ self.__crtTime = 0;
+ self.__prvTime = 0;
+ self.__prvSent = 0;
+ self.__prvDrop = 0;
+ self.__crtSent = 0;
+ self.__crtDrop = 0;
+ self.__hWeight = 0.92;
+ self.logger = Logger(defaultLogLevel)
+ try:
+ self.setDestinations(initValue)
+ self.__udpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ if len(self.configAddresses) > 0:
+ # if there are addresses that need to be monitored,
+ # start config checking and reloading thread
+ th = threading.Thread(target=self.__configLoader)
+ th.setDaemon(True) # this is a daemon thread
+ th.start()
+ # create the ProcInfo instance
+ self.procInfo = ProcInfo.ProcInfo(self.logger);
+ # self.procInfo.update();
+ # start the background monitoring thread
+ th = threading.Thread(target=self.__bgMonitor);
+ th.setDaemon(True);
+ th.start();
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Error initializing ApMon "+str(msg), True)
+
+ def sendParams (self, params):
+ """
+ Send multiple parameters to MonALISA, with default (last given) cluser and node names.
+ """
+ self.sendTimedParams (-1, params)
+
+ def sendTimedParams (self, timeStamp, params):
+ """
+ Send multiple parameters, specifying the time for them, with default (last given) cluster and node names.
+ (See sendTimedParameters for more details)
+ """
+ self.sendTimedParameters (None, None, timeStamp, params);
+
+ def sendParameter (self, clusterName, nodeName, paramName, paramValue):
+ """
+ Send a single parameter to MonALISA.
+ """
+ self.sendTimedParameter(clusterName, nodeName, -1, paramName, paramValue);
+
+ def sendTimedParameter (self, clusterName, nodeName, timeStamp, paramName, paramValue):
+ """
+ Send a single parameter, with a given time.
+ """
+ self.sendTimedParameters (clusterName, nodeName, timeStamp, {paramName:paramValue})
+
+ def sendParameters (self, clusterName, nodeName, params):
+ """
+ Send multiple parameters specifying cluster and node name for them
+ """
+ self.sendTimedParameters (clusterName, nodeName, -1, params);
+
+ def sendTimedParameters (self, clusterName, nodeName, timeStamp, params):
+ """
+ Send multiple monitored parameters to MonALISA.
+
+ - clusterName is the name of the cluster being monitored. The first
+ time this function is called, this paramenter must not be None. Then,
+ it can be None; last given clusterName will be used instead.
+ - nodeName is the name of the node for which are the parameters. If this
+ is None, the full hostname of this machine will be sent instead.
+ - timeStamp, if > 0, is given time for the parameters. This is in seconds from Epoch.
+ Note that this option should be used only if you are sure about the time for the result.
+ Otherwize, the parameters will be assigned a correct time (obtained from NTP servers)
+ in MonALISA service. This option can be usefull when parsing logs, for example.
+ - params is a dictionary containing pairs with:
+ - key: parameter name
+ - value: parameter value, either int or float.
+ or params is a vector of tuples (key, value). This version can be used
+ in case you want to send the parameters in a given order.
+
+ NOTE that python doesn't know about 32-bit floats (only 64-bit floats!)
+ """
+ try:
+ if (clusterName == None) or (clusterName == ""):
+ clusterName = self.__defaultUserCluster
+ else:
+ self.__defaultUserCluster = clusterName
+ if nodeName == None:
+ nodeName = self.__defaultUserNode
+ else:
+ self.__defaultUserNode = nodeName
+ if len(self.destinations) == 0:
+ self.logger.log(Logger.WARNING, "Not sending parameters since no destination is defined.");
+ return
+ self.__configUpdateLock.acquire();
+ for dest in self.destinations.keys():
+ self.__directSendParams(dest, clusterName, nodeName, timeStamp, params);
+ self.__configUpdateLock.release();
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Error in sendTimedParameters: "+str(msg), True)
+
+
+ def addJobToMonitor (self, pid, workDir, clusterName, nodeName):
+ """
+ Add a new job to monitor.
+ """
+ try:
+ pid = int(pid)
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Job's PID should be a number: "+str(msg), True)
+ try:
+ self.__bgMonitorLock.acquire();
+ self.monitoredJobs[pid] = {};
+ self.monitoredJobs[pid]['CLUSTER_NAME'] = clusterName;
+ self.monitoredJobs[pid]['NODE_NAME'] = nodeName;
+ self.procInfo.addJobToMonitor(pid, workDir);
+ self.__bgMonitorLock.release();
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Error in addJobToMonitor: "+str(msg), True)
+
+ def removeJobToMonitor (self, pid):
+ """
+ Remove a job from being monitored.
+ """
+ try:
+ self.__bgMonitorLock.acquire();
+ self.procInfo.removeJobToMonitor(pid);
+ if self.monitoredJobs.has_key(pid):
+ del self.monitoredJobs[pid];
+ else:
+ self.logger.log(Logger.ERROR, "Asked to stop monitoring job that is not monitored; given pid="+str(pid), False)
+ self.__bgMonitorLock.release();
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Error in removeJobToMonitor: "+str(msg), True)
+
+ def setMonitorClusterNode (self, clusterName, nodeName):
+ """
+ Set the cluster and node names where to send system related information.
+ """
+ self.__bgMonitorLock.acquire();
+ if (clusterName != None) and (clusterName != ""):
+ self.__defaultSysMonCluster = clusterName;
+ if (nodeName != None) and (nodeName != ""):
+ self.__defaultSysMonNode = nodeName;
+ self.__bgMonitorLock.release();
+
+ def enableBgMonitoring (self, onOff):
+ """
+ Enable or disable background monitoring. Note that background monitoring information
+ can still be sent if user calls the sendBgMonitoring method.
+ """
+ self.performBgMonitoring = onOff;
+
+ def sendBgMonitoring (self, mustSend = False):
+ """
+ Send background monitoring about system and jobs to all interested destinations.
+ If mustSend == True, the information is sent regardles of the elapsed time since last sent
+ If mustSend == False, the data is sent only if the required interval has passed since last sent
+ """
+ try:
+ if len(self.destinations) == 0:
+ self.logger.log(Logger.WARNING, "Not sending bg monitoring info since no destination is defined.");
+ return
+ self.__bgMonitorLock.acquire();
+ now = int(time.time());
+ updatedProcInfo = False;
+ for destination, options in self.destinations.iteritems():
+ sysParams = [];
+ jobParams = [];
+ prevRawData = self.destPrevData[destination];
+ # for each destination and its options, check if we have to report any background monitoring data
+ if(options['sys_monitoring'] and (mustSend or options['sys_data_sent'] + options['sys_interval'] <= now)):
+ for param, active in options.items():
+ m = re.match("sys_(.+)", param);
+ if(m != None and active):
+ param = m.group(1);
+ if not (param == 'monitoring' or param == 'interval' or param == 'data_sent'):
+ sysParams.append(param)
+ options['sys_data_sent'] = now;
+ if(options['job_monitoring'] and (mustSend or options['job_data_sent'] + options['job_interval'] <= now)):
+ for param, active in options.items():
+ m = re.match("job_(.+)", param);
+ if(m != None and active):
+ param = m.group(1);
+ if not (param == 'monitoring' or param == 'interval' or param == 'data_sent'):
+ jobParams.append(param);
+ options['job_data_sent'] = now;
+ if(options['general_info'] and (mustSend or options['general_data_sent'] + 2 * int(options['sys_interval']) <= now)):
+ for param, active in options.items():
+ if not (param.startswith("sys_") or param.startswith("job_")) and active:
+ if not (param == 'general_info' or param == 'general_data_sent'):
+ sysParams.append(param);
+ options['general_data_sent'] = now;
+
+ if (not updatedProcInfo) and (((len(sysParams) > 0) or (len(jobParams) > 0))):
+ self.procInfo.update();
+ updatedProcInfo = True;
+
+ sysResults = []
+ if(len(sysParams) > 0):
+ sysResults = self.procInfo.getSystemData(sysParams, prevRawData)
+ if(len(sysResults) > 0):
+ self.__directSendParams(destination, self.__defaultSysMonCluster, self.__defaultSysMonNode, -1, sysResults)
+ for pid, props in self.monitoredJobs.items():
+ jobResults = []
+ if(len(jobParams) > 0):
+ jobResults = self.procInfo.getJobData(pid, jobParams)
+ if(len(jobResults) > 0):
+ self.__directSendParams(destination, props['CLUSTER_NAME'], props['NODE_NAME'], -1, jobResults)
+ self.__bgMonitorLock.release();
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Error in sendBgMonitoring: "+str(msg), True)
+
+ def setDestinations(self, initValue):
+ """
+ Set the destinations of the ApMon instance. It accepts the same parameters as the constructor.
+ """
+ try:
+ if type(initValue) == type("string"):
+ self.configAddresses = [initValue]
+ self.configRecheck = True
+ self.configRecheckInterval = 600
+ self.__reloadAddresses()
+ elif type(initValue) == type([]):
+ self.configAddresses = initValue
+ self.configRecheck = True
+ self.configRecheckInterval = 600
+ self.__reloadAddresses()
+ elif type(initValue) == type(()):
+ self.configAddresses = []
+ for dest in initValue:
+ self.__addDestination (dest, self.destinations)
+ self.configRecheck = False
+ elif type(initValue) == type({}):
+ self.configAddresses = []
+ for dest, opts in initValue.items():
+ self.__addDestination (dest, self.destinations, opts)
+ self.configRecheck = False
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Error in setDestinations: "+str(msg), True)
+
+ def getConfig(self):
+ """
+ Returns a multi-line string that contains the configuration of ApMon. This string can
+ be passed to the setDestination method (or to the constructor). It has the same
+ structure as the config file/url contents.
+ """
+ conf = ""
+ for dest, opts in self.destinations.items():
+ h, p, w = dest
+ conf += h+":"+str(p)+" "+w+"\n\n"
+ ok = opts.keys()
+ ok.sort();
+ for o in ok:
+ conf += "xApMon_"+o+" = "+str(opts[o])+"\n"
+ conf += "TODO: add the others \n"
+ return conf
+
+ def initializedOK(self):
+ """
+ Retruns true if there is no destination where the parameters to be sent.
+ """
+ return len(self.destinations) > 0
+
+ def setLogLevel(self, strLevel):
+ """
+ Change the log level. Given level is a string, one of 'FATAL', 'ERROR', 'WARNING',
+ 'INFO', 'NOTICE', 'DEBUG'.
+ """
+ self.logger.setLogLevel(strLevel);
+
+ def setMaxMsgRate(self, rate):
+ """
+ Set the maximum number of messages that can be sent, per second.
+ """
+ self.maxMsgRate = rate;
+ self.logger.log(Logger.DEBUG, "Setting maxMsgRate to: " + str(rate));
+
+ def setMaxMsgSize(self, size):
+ """
+ Set the maximum size of the sent messages. ApMon will try to split in several independent
+ messages parameters sent in bulk, if the size would be larger than this
+ """
+ self.maxMsgSize = size
+ self.logger.log(Logger.DEBUG, "Setting maxMsgSize to: %d" % size)
+
+ def free(self):
+ """
+ Stop background threands, close opened sockets. You have to use this function if you want to
+ free all the resources that ApMon takes, and allow it to be garbage-collected.
+ """
+ try:
+ if len(self.configAddresses) > 0:
+ self.__configUpdateEvent.set()
+ self.__configUpdateFinished.wait()
+ self.__bgMonitorEvent.set()
+ self.__bgMonitorFinished.wait()
+
+ if self.__udpSocket != None:
+ self.logger.log(Logger.DEBUG, "Closing UDP socket on ApMon object destroy.");
+ self.__udpSocket.close();
+ self.__udpSocket = None;
+ self.__freed = True
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Error in free: "+str(msg), True)
+
+ #########################################################################################
+ # Internal functions - Config reloader thread
+ #########################################################################################
+
+ def __configLoader(self):
+ """
+ Main loop of the thread that checks for changes and reloads the configuration
+ """
+ try:
+ while not self.__configUpdateEvent.isSet():
+ self.__configUpdateEvent.wait(min(30, self.configRecheckInterval)) # don't recheck more often than 30 sec
+ if self.__configUpdateEvent.isSet():
+ break
+ if self.configRecheck:
+ try:
+ self.__reloadAddresses()
+ self.logger.log(Logger.DEBUG, "Config reloaded. Seleeping for "+`self.configRecheckInterval`+" sec.");
+ except Exception, msg:
+ self.logger.log(Logger.ERROR, "Error reloading config: "+str(msg), True)
+ self.__configUpdateFinished.set();
+ except:
+ pass
+
+ def __reloadAddresses(self):
+ """
+ Refresh now the destinations hash, by loading data from all sources in configAddresses
+ """
+ newDestinations = {}
+ urls = copy.deepcopy(self.configAddresses)
+ while(len(urls) > 0 and len(newDestinations) == 0):
+ src = random.choice(urls)
+ urls.remove(src)
+ self.__initializeFromFile(src, newDestinations)
+ # avoid changing config in the middle of sending packets to previous destinations
+ self.__configUpdateLock.acquire()
+ self.destinations = newDestinations
+ self.__configUpdateLock.release()
+
+ def __addDestination (self, aDestination, tempDestinations, options = __defaultOptions):
+ """
+ Add a destination to the list.
+
+ aDestination is a string of the form "{hostname|ip}[:port] [passwd]" without quotes.
+ If the port is not given, it will be used the default port (8884)
+ If the password is missing, it will be considered an empty string
+ """
+ aDestination = aDestination.strip().replace('\t', ' ')
+ while aDestination != aDestination.replace(' ', ' '):
+ aDestination = aDestination.replace(' ', ' ')
+ sepPort = aDestination.find (':')
+ sepPasswd = aDestination.rfind (' ')
+ if sepPort >= 0:
+ host = aDestination[0:sepPort].strip()
+ if sepPasswd > sepPort + 1:
+ port = aDestination[sepPort+1:sepPasswd].strip()
+ passwd = aDestination[sepPasswd:].strip()
+ else:
+ port = aDestination[sepPort+1:].strip()
+ passwd = ""
+ else:
+ port = str(self.__defaultPort)
+ if sepPasswd >= 0:
+ host = aDestination[0:sepPasswd].strip()
+ passwd = aDestination[sepPasswd:].strip()
+ else:
+ host = aDestination.strip()
+ passwd = ""
+ if (not port.isdigit()):
+ self.logger.log(Logger.WARNING, "Bad value for port number "+`port`+" in "+aDestination+" destination");
+ return
+ alreadyAdded = False
+ port = int(port)
+ try:
+ host = socket.gethostbyname(host) # convert hostnames to IP addresses to avoid suffocating DNSs
+ except socket.error, msg:
+ self.logger.log(Logger.ERROR, "Error resolving "+host+": "+str(msg))
+ return
+ for h, p, w in tempDestinations.keys():
+ if (h == host) and (p == port):
+ alreadyAdded = True
+ break
+ destination = (host, port, passwd)
+ if not alreadyAdded:
+ self.logger.log(Logger.INFO, "Adding destination "+host+':'+`port`+' '+passwd)
+ if(self.destinations.has_key(destination)):
+ tempDestinations[destination] = self.destinations[destination] # reuse previous options
+ else:
+ tempDestinations[destination] = copy.deepcopy(self.__defaultOptions) # have a different set of options for each dest
+ if not self.destPrevData.has_key(destination):
+ self.destPrevData[destination] = {} # set it empty only if it's really new
+ if not self.senderRef.has_key(destination):
+ self.senderRef[destination] = copy.deepcopy(self.__defaultSenderRef) # otherwise, don't reset this nr.
+ if options != self.__defaultOptions:
+ # we have to overwrite defaults with given options
+ for key, value in options.items():
+ self.logger.log(Logger.DEBUG, "Overwritting option: "+key+" = "+`value`)
+ tempDestinations[destination][key] = value
+ else:
+ self.logger.log(Logger.NOTICE, "Destination "+host+":"+str(port)+" "+passwd+" already added. Skipping it");
+
+ def __initializeFromFile (self, confFileName, tempDestinations):
+ """
+ Load destinations from confFileName file. If it's an URL (starts with "http://")
+ load configuration from there. Put all destinations in tempDestinations hash.
+
+ Calls addDestination for each line that doesn't start with # and
+ has non-whitespace characters on it
+ """
+ try:
+ if confFileName.find ("http://") == 0:
+ confFile = self.__getURL(confFileName)
+ if confFile is None:
+ return
+ else:
+ confFile = open (confFileName)
+ except IOError, ex:
+ self.logger.log(Logger.ERROR, "Cannot open "+confFileName);
+ self.logger.log(Logger.ERROR, "IOError: "+str(ex));
+ return
+ self.logger.log(Logger.INFO, "Adding destinations from "+confFileName);
+ dests = []
+ opts = {}
+ while(True):
+ line = confFile.readline();
+ if line == '':
+ break;
+ line = line.strip()
+ self.logger.log(Logger.DEBUG, "Reading line "+line);
+ if (len(line) == 0) or (line[0] == '#'):
+ continue
+ elif line.startswith("xApMon_"):
+ m = re.match("xApMon_(.*)", line);
+ if m != None:
+ m = re.match("(\S+)\s*=\s*(\S+)", m.group(1));
+ if m != None:
+ param = m.group(1); value = m.group(2);
+ if(value.upper() == "ON"):
+ value = True;
+ elif(value.upper() == "OFF"):
+ value = False;
+ elif(param.endswith("_interval")):
+ value = int(value);
+ if param == "loglevel":
+ self.logger.setLogLevel(value);
+ elif param == "maxMsgRate":
+ self.setMaxMsgRate(int(value));
+ elif param == "conf_recheck":
+ self.configRecheck = value;
+ elif param == "recheck_interval":
+ self.configRecheckInterval = value;
+ elif param.endswith("_data_sent"):
+ pass; # don't reset time in sys/job/general/_data_sent
+ else:
+ opts[param] = value;
+ else:
+ dests.append(line);
+
+ confFile.close ()
+ for line in dests:
+ self.__addDestination(line, tempDestinations, opts)
+
+ ###############################################################################################
+ # Internal functions - Background monitor thread
+ ###############################################################################################
+
+ def __bgMonitor (self):
+ try:
+ while not self.__bgMonitorEvent.isSet():
+ self.__bgMonitorEvent.wait(10)
+ if self.__bgMonitorEvent.isSet():
+ break
+ if self.performBgMonitoring:
+ self.sendBgMonitoring() # send only if the interval has elapsed
+ self.__bgMonitorFinished.set()
+ except: #catch-all
+ pass
+
+ ###############################################################################################
+ # Internal helper functions
+ ###############################################################################################
+
+ # this is a simplified replacement for urllib2 which doesn't support setting a timeout.
+ # by default, if timeout is not specified, it waits 5 seconds
+ def __getURL (self, url, timeout = 5):
+ r = re.compile("http://([^:/]+)(:(\d+))?(/.*)").match(url)
+ if r is None:
+ self.logger.log(Logger.ERROR, "Cannot open "+url+". Incorrectly formed URL.")
+ return None
+ host = r.group(1)
+ if r.group(3) == None:
+ port = 80 # no port is given, pick the default 80 for HTTP
+ else:
+ port = int(r.group(3))
+ if r.group(4) == None:
+ path = "" # no path is give, let server decide
+ else:
+ path = r.group(4)
+ sock = None
+ err = None
+ try:
+ for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ try:
+ sock = socket.socket(af, socktype, proto)
+ except socket.error, msg:
+ sock = None
+ err = msg
+ continue
+ try:
+ if hasattr(sock, 'settimeout'):
+ self.logger.log(Logger.DEBUG, "Setting socket timeout with settimeout.")
+ sock.settimeout(timeout);
+ else:
+ self.logger.log(Logger.DEBUG, "Setting socket timeout with setsockopt.")
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDTIMEO, struct.pack("ii", timeout, 0))
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, struct.pack("ii", timeout, 0))
+ sock.connect(sa)
+ except socket.error, msg:
+ sock.close()
+ sock = None
+ err = msg
+ continue
+ break
+ except socket.error, msg:
+ sock = None
+ err = msg
+ if sock is None:
+ self.logger.log(Logger.ERROR, "Cannot open "+url)
+ self.logger.log(Logger.ERROR, "SocketError: "+str(err))
+ return None
+ try:
+ sock.send("GET "+path+" HTTP/1.0\n\n");
+ data = ""
+ done = False
+ while not done:
+ moreData = sock.recv(4096)
+ data += moreData
+ done = len(moreData) == 0
+ sock.close()
+ file = StringIO.StringIO(data)
+ httpStatus = 0
+ while True:
+ line = file.readline().strip()
+ if line == "":
+ break # exit at the end of file or at the first empty line (finish of http headers)
+ r = re.compile("HTTP/\d.\d (\d+)").match(line)
+ if r != None:
+ httpStatus = int(r.group(1))
+ if httpStatus == 200:
+ return file
+ else:
+ self.logger.log(Logger.ERROR, "Cannot open "+url)
+ if httpStatus == 401:
+ self.logger.log(Logger.ERROR, 'HTTPError: not authorized ['+str(httpStatus)+']')
+ elif httpStatus == 404:
+ self.logger.log(Logger.ERROR, 'HTTPError: not found ['+str(httpStatus)+']')
+ elif httpStatus == 503:
+ self.logger.log(Logger.ERROR, 'HTTPError: service unavailable ['+str(httpStatus)+']')
+ else:
+ self.logger.log(Logger.ERROR, 'HTTPError: unknown error ['+str(httpStatus)+']')
+ return None
+ except socket.error, msg:
+ self.logger.log(Logger.ERROR, "Cannot open "+url)
+ self.logger.log(Logger.ERROR, "SocketError: "+str(msg))
+ sock.close()
+ return None
+
+ def __directSendParams (self, destination, clusterName, nodeName, timeStamp, params):
+
+ if destination == None:
+ self.logger.log(Logger.WARNING, "Destination is None");
+ return;
+
+ host, port, passwd = destination
+ crtSenderRef = self.senderRef[destination]
+
+ hdrPacker = xdrlib.Packer ()
+ hdrPacker.pack_string ("v:"+self.__version+"p:"+passwd)
+ hdrPacker.pack_int (crtSenderRef['INSTANCE_ID'])
+ hdrBuffer1 = hdrPacker.get_buffer()
+ hdrPacker.reset()
+
+ hdrPacker.pack_string (clusterName)
+ hdrPacker.pack_string (nodeName)
+ hdrBuffer2 = hdrPacker.get_buffer()
+ hdrPacker.reset()
+
+ hdrSize = len(hdrBuffer1) + len(hdrBuffer2) + 4
+
+ paramPacker = xdrlib.Packer ()
+ paramBlocks = []
+
+ crtParamsCount = 0
+ crtParamsBuffer = ''
+ crtParamsBuffSize = 0
+
+ map = None
+ if type(params) == type( {} ):
+ map = params.iteritems()
+ elif type(params) == type( [] ):
+ map = params
+ if map is not None:
+ for name, value in map:
+ if self.__packParameter(paramPacker, name, value):
+ buf = paramPacker.get_buffer()
+ bufLen = len(buf)
+ paramPacker.reset()
+ if bufLen + crtParamsBuffSize + hdrSize + 8 <= self.maxMsgSize: # 8 for 2 ints: params count and result time
+ crtParamsBuffer += buf
+ crtParamsBuffSize += bufLen
+ crtParamsCount += 1
+ else:
+ self.logger.log(Logger.NOTICE, "Message is getting too big (Max size="+str(self.maxMsgSize)+"). Splitting it at "+name+"="+str(value))
+ paramBlocks.append((crtParamsCount, crtParamsBuffer))
+ paramPacker.reset()
+ crtParamsCount = 1
+ crtParamsBuffer = buf
+ crtParamsBuffSize = bufLen
+ else:
+ self.logger.log(Logger.WARNING, "Unsupported params type in sendParameters: " + str(type(params)))
+
+ paramBlocks.append((crtParamsCount, crtParamsBuffer)) # update last params block
+ paramPacker.reset()
+
+ paramsTime = ''
+ if (timeStamp != None) and (timeStamp > 0):
+ paramPacker.pack_int(timeStamp)
+ paramsTime = paramsPacker.get_buffer()
+ paramPacker.reset()
+
+ for paramsCount, paramsBuffer in paramBlocks:
+ if self.__shouldSend() == False:
+ self.logger.log(Logger.WARNING, "Dropping packet since rate is too fast!");
+ continue;
+ paramPacker.pack_int(paramsCount)
+ crtSenderRef['SEQ_NR'] = (crtSenderRef['SEQ_NR'] + 1) % 2000000000; # wrap around 2 mld
+ hdrPacker.pack_int (crtSenderRef['SEQ_NR'])
+ buffer = hdrBuffer1 + hdrPacker.get_buffer() + hdrBuffer2 + paramPacker.get_buffer() + paramsBuffer + paramsTime
+ hdrPacker.reset()
+ paramPacker.reset()
+ bufLen = len(buffer)
+ self.logger.log(Logger.NOTICE, "Building XDR packet ["+str(clusterName)+"/"+str(nodeName)+"] <"+str(crtSenderRef['SEQ_NR'])+"/"+str(crtSenderRef['INSTANCE_ID'])+"> "+str(paramsCount)+" params, "+str(bufLen)+" bytes.");
+ if bufLen > self.maxMsgSize:
+ self.logger.log(Logger.WARNING, "Couldn't split parameter set (name/value pairs might be too large?). Message length is: "+str(bufLen)+". It might be dropped if > 1500. Sending anyway.")
+ # send this buffer to the destination, using udp datagrams
+ try:
+ self.__udpSocket.sendto(buffer, (host, port))
+ self.logger.log(Logger.NOTICE, "Packet sent to "+host+":"+str(port)+" "+passwd)
+ except socket.error, msg:
+ self.logger.log(Logger.ERROR, "Cannot send packet to "+host+":"+str(port)+" "+passwd+": "+str(msg[1]))
+
+ def __packParameter(self, xdrPacker, name, value):
+ if (name is None) or (name is ""):
+ self.logger.log(Logger.WARNING, "Undefined parameter name. Ignoring value "+str(value))
+ return False
+ if (value is None):
+ self.logger.log(Logger.WARNING, "Ignore " + str(name)+ " parameter because of None value")
+ return False
+ try:
+ typeValue = self.__valueTypes[type(value)]
+ xdrPacker.pack_string (name)
+ xdrPacker.pack_int (typeValue)
+ self.__packFunctions[typeValue] (xdrPacker, value)
+ self.logger.log(Logger.NOTICE, "Adding parameter "+str(name)+" = "+str(value))
+ return True
+ except Exception, ex:
+ self.logger.log(Logger.WARNING, "Error packing %s = %s; got %s" % (name, str(value), ex))
+ return False
+
+ # Destructor
+ def __del__(self):
+ if not self.__freed:
+ self.free();
+
+ # Decide if the current datagram should be sent.
+ # This decision is based on the number of messages previously sent.
+ def __shouldSend(self):
+ now = long(time.time());
+ if now != self.__crtTime :
+ # new time
+ # update previous counters;
+ self.__prvSent = self.__hWeight * self.__prvSent + (1.0 - self.__hWeight) * self.__crtSent / (now - self.__crtTime);
+ self.__prvTime = self.__crtTime;
+ self.logger.log(Logger.DEBUG, "previously sent: " + str(self.__crtSent) + "; dropped: " + str(self.__crtDrop));
+ # reset current counter
+ self.__crtTime = now;
+ self.__crtSent = 0;
+ self.__crtDrop = 0;
+
+ # compute the history
+ valSent = self.__prvSent * self.__hWeight + self.__crtSent * (1 - self.__hWeight);
+
+ doSend = True;
+
+ # when we should start dropping messages
+ level = self.maxMsgRate - self.maxMsgRate / 10;
+
+ if valSent > (self.maxMsgRate - level) :
+ if random.randint(0,self.maxMsgRate / 10) >= (self.maxMsgRate - valSent):
+ doSend = False;
+
+ # counting sent and dropped messages
+ if doSend:
+ self.__crtSent+=1;
+ else:
+ self.__crtDrop+=1;
+
+ return doSend;
+
+ # Try to generate a more random instance id. It takes the process ID and
+ # combines it with the last digit from the IP addess and a random number
+ def __getInstanceID(self):
+ pid = os.getpid()
+ ip = random.randint(0, 255)
+ try:
+ sip = socket.gethostbyname(socket.gethostname())
+ ip = int(sip[1+sip.rfind('.'):])
+ except socket.error:
+ pass
+ rnd = random.randint(0, 255)
+ return (pid << 16) | (ip << 8) | rnd
+
+
+ ################################################################################################
+ # Private variables. Don't touch
+ ################################################################################################
+
+ __valueTypes = {
+ type("string"): 0, # XDR_STRING (see ApMon.h from C/C++ ApMon version)
+ type(1): 2, # XDR_INT32
+ type(1L): 5, # send longs as doubles
+ type(1.0): 5}; # XDR_REAL64
+
+ __packFunctions = {
+ 0: xdrlib.Packer.pack_string,
+ 2: xdrlib.Packer.pack_int,
+ 5: xdrlib.Packer.pack_double }
+
+ __defaultPort = 8884
+ __version = "2.2.13-py" # apMon version number
+
--- /dev/null
+#!/usr/bin/python
+
+#
+# P2P lightweigth monitoring client - sends data to MonALISA service
+#
+
+import apmon
+import time
+import random
+import sys
+
+CONFIG_FILE = "./MLservice.conf"
+
+def usage():
+ print "Usage: "
+
+def main():
+ if len(sys.argv) < 4:
+ usage()
+ sys.exit(2)
+
+ node = sys.argv[1]
+ paramName = sys.argv[2]
+ paramValue = float(sys.argv[3])
+
+ # read service host from configuration file
+ apm = apmon.ApMon(CONFIG_FILE)
+ apm.sendParameter("P2P-Next", node, paramName, paramValue)
+ apm.free()
+
+
+if __name__ == "__main__":
+ main()
+
+# vim:set ts=4:
+# vim:expandtab:
+# vim:set sw=4:
--- /dev/null
+#!/bin/bash
+
+if test $# -ne 7; then
+ echo "Usage: $0 client_name metafile kill|no_kill enable_monitor port upload_speed download_speed"
+ echo -e "\tclient_name is:"
+ echo -e "\t\tazureus"
+ echo -e "\t\tmainline"
+ echo -e "\t\ttribler"
+ echo -e "\t\ttransmission"
+ echo -e "\t\thrktorrent"
+ echo -e "\t\taria"
+ exit 1
+fi
+
+BASE_DIR=/home/p2p/p2p-svn-repo/trunk/bt-prof/
+
+source $BASE_DIR/config.bash
+
+CLIENT_NAME=$1
+METAFILE=$2
+KILL_AFTER_DLD=$3
+MON_ENABLE=$4
+PORT=$5
+UPLOAD_SPEED=$6
+DOWNLOAD_SPEED=$7
+
+if ! test -f "$METAFILE"; then
+ echo "Error: Meta file does not exist: $METAFILE"
+ exit 1
+fi
+
+echo "Running $CLIENT_NAME"
+cd $RUN_DIR/$CLIENT_NAME
+nohup bash run.bash ${BASE_DIR}/config.bash "$METAFILE" "${KILL_AFTER_DL}" "${MON_ENABLE}" $PORT $UPLOAD_SPEED $DOWNLOAD_SPEED &> /dev/null &
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+TRACKER_PORT=6969
+TRACKER_PATH=/home/razvan/projects/BitTornado-CVS/bttrack.py
+TRACKER_LOG_FILE=./tracker.log
+
+# tracker may already be started
+netstat --tcp --listening --numeric | grep ":$TRACKER_PORT" &> /dev/null
+if test $? -ne 0; then
+ nohup $TRACKER_PATH --port $TRACKER_PORT --dfile $TRACKER_LOG_FILE &> /dev/null &
+fi
--- /dev/null
+#!/bin/bash
+
+ADDRESS_LIST="10.38.7.26 10.38.8.193 10.38.7.22 10.38.7.29 10.38.8.62 10.38.7.31"
+
+for i in ${ADDRESS_LIST}; do
+ echo $i;
+ iptables -t filter -A INPUT -s $i -j DROP
+ iptables -t filter -A OUTPUT -d $i -j DROP
+done
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+source config.bash
+
+for i in $(seq 0 5); do
+ ssh ${USERNAME}@${ADDRESS_ARRAY[$i]} "bash ${CANCEL_SCRIPT}"
+done
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+declare -a CLIENT_ARRAY
+CLIENT_ARRAY=("transmission" "mainline" "hrktorrent" "tribler" "azureus")
+
+declare -a CLIENT_ARRAY_SIZE
+CLIENT_ARRAY_SIZE=5
+
+declare -a ADDRESS_ARRAY
+ADDRESS_ARRAY=("141.85.37.26" "141.85.37.26" "141.85.37.26" "141.85.37.26" "141.85.37.26")
+PORT_ARRAY=("50122" "50222" "50322" "50422" "50522")
+#ADDRESS_ARRAY=("10.38.7.26" "10.38.8.193" "10.38.7.22" "10.38.7.29" "10.38.8.62" "10.38.7.31")
+#ADDRESS_ARRAY=("141.85.37.241" "141.85.37.242" "141.85.37.243" "141.85.37.244" "141.85.37.245" "141.85.37.246")
+
+USERNAME=p2p
+RUN_SCRIPT=/home/p2p/p2p-svn-repo/trunk/bt-prof/run.bash
+CLEAN_SCRIPT=/home/p2p/p2p-svn-repo/trunk/bt-prof/clean.bash
+CANCEL_SCRIPT=/home/p2p/p2p-svn-repo/trunk/bt-prof/cancel.bash
+METADIR=/home/p2p/p2p-svn-repo/trunk/bt-prof/meta/
+#METAFILE=ubuntu-8.10-desktop-i386.iso.torrent
+METAFILE=tt.torrent
+KILL_AFTER_DL="no_kill"
+#ubuntu_7.10_dvd_i386.torrent
+#ebooks.torrent
+#dark_knight.torrent
+#prison_break_4x01.torrent
+#dexter.s2.torrent
+#tropic_thunder.torrrent
+#Fedora-8-dvd-i386.torrent
+#kubuntu-7.04-dvd-amd64.iso.torrent
+#kubuntu-7.04-dvd-i386.iso.torrent
+#kubuntu-8.04-dvd-i386.iso.torrent
+#mandriva-linux-2008.0-free-dvd-i586.torrent
+#opensuse-11.torrent
+#speed-racer.torrent
+#ubuntu-8.10-desktop-i386.iso.torrent
+METAPATH=${METADIR}/${METAFILE}
--- /dev/null
+#!/bin/bash
+
+source config.bash
+
+for i in $(seq 0 $(($CLIENT_ARRAY_SIZE - 1))); do
+ ssh ${USERNAME}@${ADDRESS_ARRAY[$i]} -p ${PORT_ARRAY[$i]} "bash ${CLEAN_SCRIPT}"
+ ssh ${USERNAME}@${ADDRESS_ARRAY[$i]} -p ${PORT_ARRAY[$i]} "nohup bash ${RUN_SCRIPT} ${CLIENT_ARRAY[$i]} ${METAPATH} ${KILL_AFTER_DL} &> /dev/null &"
+# ssh ${USERNAME}@${ADDRESS_ARRAY[$i]} "touch bla"
+ echo "$USERNAME ${ADDRESS_ARRAY[$i]} ${RUN_SCRIPT} ${CLIENT_ARRAY[$i]} ${METAPATH}"
+done
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+REMOTE_USER=p2p
+REMOTE_HOST=p2p-next-01.grid.pub.ro
+REMOTE_PORT=10122
+SVN_CHECKOUT_DIR=/home/p2p/p2p-svn-repo
+CANCEL_SCRIPT=trunk/bt-prof/cancel.bash
+
+ssh -l $REMOTE_USER $REMOTE_HOST -p $REMOTE_PORT \
+ "bash $SVN_CHECKOUT_DIR/$CANCEL_SCRIPT"
--- /dev/null
+#!/bin/bash
+
+REMOTE_USER=p2p
+REMOTE_HOST=p2p-next-01.grid.pub.ro
+REMOTE_PORT=10122
+SVN_CHECKOUT_DIR=/home/p2p/p2p-svn-repo
+CANCEL_SCRIPT=trunk/bt-prof/cancel.bash
+CLEANUP_SCRIPT=trunk/bt-prof/cleanup.bash
+
+ssh -l $REMOTE_USER $REMOTE_HOST -p $REMOTE_PORT \
+ "bash $SVN_CHECKOUT_DIR/$CANCEL_SCRIPT"
+ssh -l $REMOTE_USER $REMOTE_HOST -p $REMOTE_PORT \
+ "bash $SVN_CHECKOUT_DIR/$CLEANUP_SCRIPT"
--- /dev/null
+#!/bin/bash
+
+REMOTE_USER=p2p
+REMOTE_HOST=p2p-next-01.grid.pub.ro
+REMOTE_PORT=10122
+SVN_CHECKOUT_DIR=/home/p2p/p2p-svn-repo
+LIST_SCRIPT=trunk/bt-prof/list.bash
+
+ssh -l $REMOTE_USER $REMOTE_HOST -p $REMOTE_PORT \
+ "bash $SVN_CHECKOUT_DIR/$LIST_SCRIPT"
--- /dev/null
+#!/bin/bash
+
+REMOTE_USER=p2p
+REMOTE_HOST=p2p-next-01.grid.pub.ro
+REMOTE_PORT=10122
+SVN_CHECKOUT_DIR=/home/p2p/p2p-svn-repo
+RUN_SCRIPT=trunk/bt-prof/run.bash
+METAFILE_DIR=/home/p2p/p2p-meta
+
+# test arguments
+# arguments are
+# - client name (#1)
+# - metafile (#2)
+# - kill after download (yes|no) (#3)
+# - enable monitoring (yes|no) (#4)
+# - BitTorrent client upload port (#5)
+# - upload speed (KB/s) (#6)
+# - download speed (KB/s) (#7)
+
+CLIENT_NAME=hrktorrent
+METAFILE=$METAFILE_DIR/fedora-11-i386-dvd.torrent
+KILL_AFTER_DLD=no
+MON_ENABLE=no
+PORT=50100
+UPLOAD_SPEED=64
+DOWNLOAD_SPEED=512
+
+ssh -l $REMOTE_USER $REMOTE_HOST -p $REMOTE_PORT \
+ "bash $SVN_CHECKOUT_DIR/$RUN_SCRIPT $CLIENT_NAME $METAFILE $KILL_AFTER_DLD $MON_ENABLE $PORT $UPLOAD_SPEED $DOWNLOAD_SPEED"
--- /dev/null
+#!/bin/bash
+
+PACKAGES="build-essential python intltool python-twisted python-apsw sun-java6-jdk libgnutls-dev libgpg-error-dev libgcrypt-dev libares-dev libxml2-dev libssl-dev pkg-config libcurl4-openssl-dev automake1.10 autoconf python-m2crypto python-wxgtk2.8 python-pysqlite2 libboost-dev libboost-date-time-dev libboost-filesystem-dev libboost-thread-dev libboost-iostreams-dev libboost-program-options-dev libexpat1-dev dialog libncurses-dev"
+
+apt-get install -y $PACKAGES
--- /dev/null
+p2p-next-01-01 256 512
+p2p-next-01-02 256 512
+p2p-next-01-03 256 512
+p2p-next-01-04 32 64
+p2p-next-01-05 32 64
+p2p-next-02-01 32 64
+p2p-next-02-02 32 64
+p2p-next-02-03 32 64
+p2p-next-02-04 256 512
+p2p-next-02-05 256 512
+p2p-next-03-01 256 512
+p2p-next-03-02 256 512
+p2p-next-03-03 256 512
+p2p-next-03-04 32 64
+p2p-next-03-05 32 64
+p2p-next-04-01 32 64
+p2p-next-04-02 32 64
+p2p-next-04-03 32 64
+p2p-next-04-04 256 512
+p2p-next-04-05 256 512
+p2p-next-06-01 256 512
+p2p-next-06-02 256 512
+p2p-next-06-03 256 512
+p2p-next-06-04 32 64
+p2p-next-06-05 32 64
+p2p-next-07-01 32 64
+p2p-next-07-02 32 64
+p2p-next-07-03 32 64
+p2p-next-07-04 256 512
+p2p-next-07-05 256 512
+p2p-next-08-01 256 512
+p2p-next-08-02 256 512
+p2p-next-08-03 256 512
+p2p-next-08-04 32 64
+p2p-next-08-05 32 64
+p2p-next-09-01 32 64
+p2p-next-09-02 32 64
+p2p-next-09-03 32 64
+p2p-next-09-04 256 512
+p2p-next-09-05 256 512
--- /dev/null
+p2p-next-01.grid.pub.ro 10522 p2p
+p2p-next-02.grid.pub.ro 10122 p2p
+p2p-next-02.grid.pub.ro 10222 p2p
+p2p-next-02.grid.pub.ro 10322 p2p
+p2p-next-02.grid.pub.ro 10422 p2p
+p2p-next-02.grid.pub.ro 10522 p2p
+p2p-next-03.grid.pub.ro 10122 p2p
+p2p-next-03.grid.pub.ro 10222 p2p
+p2p-next-03.grid.pub.ro 10322 p2p
+p2p-next-03.grid.pub.ro 10422 p2p
+p2p-next-03.grid.pub.ro 10522 p2p
+p2p-next-04.grid.pub.ro 10122 p2p
+p2p-next-04.grid.pub.ro 10222 p2p
+p2p-next-04.grid.pub.ro 10322 p2p
+p2p-next-04.grid.pub.ro 10422 p2p
+p2p-next-04.grid.pub.ro 10522 p2p
+p2p-next-06.grid.pub.ro 10122 p2p
+p2p-next-06.grid.pub.ro 10222 p2p
+p2p-next-06.grid.pub.ro 10322 p2p
+p2p-next-06.grid.pub.ro 10422 p2p
+p2p-next-06.grid.pub.ro 10522 p2p
+p2p-next-07.grid.pub.ro 10122 p2p
+p2p-next-07.grid.pub.ro 10222 p2p
+p2p-next-07.grid.pub.ro 10322 p2p
+p2p-next-07.grid.pub.ro 10422 p2p
+p2p-next-07.grid.pub.ro 10522 p2p
+p2p-next-08.grid.pub.ro 10122 p2p
+p2p-next-08.grid.pub.ro 10222 p2p
+p2p-next-08.grid.pub.ro 10322 p2p
+p2p-next-08.grid.pub.ro 10422 p2p
+p2p-next-08.grid.pub.ro 10522 p2p
+p2p-next-09.grid.pub.ro 10122 p2p
+p2p-next-09.grid.pub.ro 10222 p2p
+p2p-next-09.grid.pub.ro 10322 p2p
+p2p-next-09.grid.pub.ro 10422 p2p
+p2p-next-09.grid.pub.ro 10522 p2p
--- /dev/null
+p2p-next-01.grid.pub.ro 10122 p2p
+p2p-next-01.grid.pub.ro 10222 p2p
+p2p-next-01.grid.pub.ro 10322 p2p
+p2p-next-01.grid.pub.ro 10422 p2p
+p2p-next-01.grid.pub.ro 10522 p2p
+p2p-next-02.grid.pub.ro 10122 p2p
+p2p-next-02.grid.pub.ro 10222 p2p
+p2p-next-02.grid.pub.ro 10322 p2p
+p2p-next-02.grid.pub.ro 10422 p2p
+p2p-next-02.grid.pub.ro 10522 p2p
+p2p-next-03.grid.pub.ro 10122 p2p
+p2p-next-03.grid.pub.ro 10222 p2p
+p2p-next-03.grid.pub.ro 10322 p2p
+p2p-next-03.grid.pub.ro 10422 p2p
+p2p-next-03.grid.pub.ro 10522 p2p
+p2p-next-04.grid.pub.ro 10122 p2p
+p2p-next-04.grid.pub.ro 10222 p2p
+p2p-next-04.grid.pub.ro 10322 p2p
+p2p-next-04.grid.pub.ro 10422 p2p
+p2p-next-04.grid.pub.ro 10522 p2p
+p2p-next-06.grid.pub.ro 10122 p2p
+p2p-next-06.grid.pub.ro 10222 p2p
+p2p-next-06.grid.pub.ro 10322 p2p
+p2p-next-06.grid.pub.ro 10422 p2p
+p2p-next-06.grid.pub.ro 10522 p2p
+p2p-next-07.grid.pub.ro 10122 p2p
+p2p-next-07.grid.pub.ro 10222 p2p
+p2p-next-07.grid.pub.ro 10322 p2p
+p2p-next-07.grid.pub.ro 10422 p2p
+p2p-next-07.grid.pub.ro 10522 p2p
+p2p-next-08.grid.pub.ro 10122 p2p
+p2p-next-08.grid.pub.ro 10222 p2p
+p2p-next-08.grid.pub.ro 10322 p2p
+p2p-next-08.grid.pub.ro 10422 p2p
+p2p-next-08.grid.pub.ro 10522 p2p
+p2p-next-09.grid.pub.ro 10122 p2p
+p2p-next-09.grid.pub.ro 10222 p2p
+p2p-next-09.grid.pub.ro 10322 p2p
+p2p-next-09.grid.pub.ro 10422 p2p
+p2p-next-09.grid.pub.ro 10522 p2p
--- /dev/null
+p2p-next-06.grid.pub.ro 10122 p2p
+p2p-next-06.grid.pub.ro 10222 p2p
+p2p-next-06.grid.pub.ro 10322 p2p
+p2p-next-06.grid.pub.ro 10422 p2p
+p2p-next-06.grid.pub.ro 10522 p2p
+p2p-next-07.grid.pub.ro 10122 p2p
+p2p-next-07.grid.pub.ro 10222 p2p
+p2p-next-07.grid.pub.ro 10322 p2p
+p2p-next-07.grid.pub.ro 10422 p2p
+p2p-next-07.grid.pub.ro 10522 p2p
+p2p-next-08.grid.pub.ro 10122 p2p
+p2p-next-08.grid.pub.ro 10222 p2p
+p2p-next-08.grid.pub.ro 10322 p2p
+p2p-next-08.grid.pub.ro 10422 p2p
+p2p-next-08.grid.pub.ro 10522 p2p
+p2p-next-09.grid.pub.ro 10122 p2p
+p2p-next-09.grid.pub.ro 10222 p2p
+p2p-next-09.grid.pub.ro 10322 p2p
+p2p-next-09.grid.pub.ro 10422 p2p
+p2p-next-09.grid.pub.ro 10522 p2p
--- /dev/null
+p2p-next-01.grid.pub.ro 10222 p2p
+p2p-next-01.grid.pub.ro 10322 p2p
+p2p-next-01.grid.pub.ro 10422 p2p
+p2p-next-01.grid.pub.ro 10522 p2p
+p2p-next-02.grid.pub.ro 10122 p2p
+p2p-next-02.grid.pub.ro 10222 p2p
+p2p-next-02.grid.pub.ro 10322 p2p
+p2p-next-02.grid.pub.ro 10422 p2p
+p2p-next-02.grid.pub.ro 10522 p2p
+p2p-next-03.grid.pub.ro 10122 p2p
+p2p-next-03.grid.pub.ro 10222 p2p
+p2p-next-03.grid.pub.ro 10322 p2p
+p2p-next-03.grid.pub.ro 10422 p2p
+p2p-next-03.grid.pub.ro 10522 p2p
+p2p-next-04.grid.pub.ro 10122 p2p
+p2p-next-04.grid.pub.ro 10222 p2p
+p2p-next-04.grid.pub.ro 10322 p2p
+p2p-next-04.grid.pub.ro 10422 p2p
+p2p-next-04.grid.pub.ro 10522 p2p
+p2p-next-06.grid.pub.ro 10122 p2p
+p2p-next-06.grid.pub.ro 10222 p2p
+p2p-next-06.grid.pub.ro 10322 p2p
+p2p-next-06.grid.pub.ro 10422 p2p
+p2p-next-06.grid.pub.ro 10522 p2p
+p2p-next-07.grid.pub.ro 10122 p2p
+p2p-next-07.grid.pub.ro 10222 p2p
+p2p-next-07.grid.pub.ro 10322 p2p
+p2p-next-07.grid.pub.ro 10422 p2p
+p2p-next-07.grid.pub.ro 10522 p2p
+p2p-next-08.grid.pub.ro 10122 p2p
+p2p-next-08.grid.pub.ro 10222 p2p
+p2p-next-08.grid.pub.ro 10322 p2p
+p2p-next-08.grid.pub.ro 10422 p2p
+p2p-next-08.grid.pub.ro 10522 p2p
+p2p-next-09.grid.pub.ro 10122 p2p
+p2p-next-09.grid.pub.ro 10222 p2p
+p2p-next-09.grid.pub.ro 10322 p2p
+p2p-next-09.grid.pub.ro 10422 p2p
+p2p-next-09.grid.pub.ro 10522 p2p
--- /dev/null
+p2p-next-01.grid.pub.ro 10122 p2p
--- /dev/null
+#!/bin/bash
+
+CONFIG=/home/p2p/p2p-svn-repo/trunk/michel/client_config.txt
+TORRENT=/home/p2p/p2p-svn-repo/trunk/michel/file.torrent
+
+hname=$(hostname)
+index=${hname/p2p-next-[0-9][0-9]-/}
+PORT=5${index}00
+
+read name up down <<< $(grep $(hostname) $CONFIG)
+
+cd /home/p2p/p2p-svn-repo/trunk/bt-prof/
+
+ping -c 3 141.85.224.15 &> /dev/null
+ping -c 3 anaconda.cs.pub.ro &> /dev/null
+
+exec bash run.bash hrktorrent $TORRENT no_kill yes $PORT $up $down
+
+exit 0
--- /dev/null
+#!/bin/bash
+
+hosts=($(cut -f 1 < ./client_info.txt | tr '\n' ' '))
+ports=($(cut -f 2 < ./client_info.txt | tr '\n' ' '))
+users=($(cut -f 3 < ./client_info.txt | tr '\n' ' '))
+nlines=$(wc -l < ./client_info.txt)
+
+for i in $(seq 0 $(($nlines-1)) ); do
+ echo "ssh -l ${users[$i]} ${hosts[$i]} -p ${ports[$i]} \"nohup bash /home/p2p/p2p-svn-repo/trunk/michel/local_run &> /dev/null &\" "
+ ssh -l ${users[$i]} ${hosts[$i]} -p ${ports[$i]} "nohup bash /home/p2p/p2p-svn-repo/trunk/michel/local_run &> /home/p2p/nohup.out &"
+done
+
+exit 0
--- /dev/null
+diff --git a/Makefile b/Makefile
+index 20c89a9..677bd48 100644
+--- a/Makefile
++++ b/Makefile
+@@ -9,7 +9,7 @@ $(OUT): $(OBJ)
+ $(CXX) $(LDFLAGS) $(OBJ) $(LIBS) -o $(OUT)
+
+ clean:
+- rm -rf $(OBJ) $(OUT)
++ -rm -rf $(OBJ) $(OUT) *~
+
+ install: all
+ @install hrktorrent ${PREFIX}/bin
+@@ -19,6 +19,6 @@ install: all
+ @install -m 644 hrktorrent.1 ${MANPREFIX}/man1/hrktorrent.1
+
+ uninstall:
+- @rm -f ${PREFIX}/bin/hrktorrent
+- @rm -f ${MANPREFIX}/man1/hrktorrent.1
+- @rm -r ${PREFIX}/share/examples/hrktorrent/
++ -@rm -f ${PREFIX}/bin/hrktorrent
++ -@rm -f ${MANPREFIX}/man1/hrktorrent.1
++ -@rm -r ${PREFIX}/share/examples/hrktorrent/
+diff --git a/core.cpp b/core.cpp
+index 5b30d20..8f51f52 100644
+--- a/core.cpp
++++ b/core.cpp
+@@ -55,13 +55,10 @@ CCore::StatusLoop(void* data)
+ if (stdout_is_tty) {\r
+ if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) < 0) {\r
+ char errbuf[128];\r
+- std::string errstring;\r
+ \r
+ strerror_r(errno, errbuf, 128);\r
+- errstring = "ioctl: ";\r
+- errstring.append((const char *) errbuf);\r
++ std::cerr << "ioctl error: " << errbuf << std::endl;\r
+ \r
+- Core->VerbosePrint("Core", errstring);\r
+ exit(EXIT_FAILURE);\r
+ }\r
+ columns = ws.ws_col;\r
+@@ -113,15 +110,6 @@ CCore::StatusLoop(void* data)
+ output << eta << "s";\r
+ s_output = output.str();\r
+ \r
+- if(s_output.length() > columns) {\r
+- s_output.resize(columns - 3);\r
+- s_output.append("..");\r
+- }\r
+- else if(s_output.length() < columns) {\r
+- for(int i = 0; i < s_output.length() - columns; i++)\r
+- s_output.append(" ");\r
+- }\r
+-\r
+ if (stdout_is_tty) {\r
+ if(s_output.length() > columns) {\r
+ s_output.resize(columns - 3);\r
+@@ -199,6 +187,31 @@ CCore::saveDHT()
+ nodefile.close();\r
+ }\r
+ \r
++static void\r
++SignalHandler(int signo)\r
++{\r
++ Core->VerbosePrint("Core", "Received signal.");\r
++}\r
++\r
++void\r
++CCore::ScheduleSignal(int signo)\r
++{\r
++ struct sigaction sa;\r
++\r
++ memset(&sa, 0, sizeof(sa));\r
++ sa.sa_handler = SignalHandler;\r
++ sa.sa_flags = SA_RESETHAND;\r
++\r
++ if (sigaction(signo, &sa, NULL) < 0) {\r
++ char errbuf[128];\r
++\r
++ strerror_r(errno, errbuf, 128);\r
++\r
++ std::cerr << "sigaction error: " << errbuf << std::endl;\r
++ exit(EXIT_FAILURE);\r
++ }\r
++}\r
++\r
+ int\r
+ CCore::Run()\r
+ {\r
+@@ -261,12 +274,23 @@ CCore::Run()
+ _session->set_ip_filter(IPFilter->getFilter());\r
+ }\r
+ \r
+- std::cout << "\"Return\" shuts hrktorrent down.\n" << std::endl;\r
++ ScheduleSignal(SIGINT);\r
++ std::cout << "\"CTRL-C\" shuts hrktorrent down.\n" << std::endl;\r
++\r
+ pthread_create(&statusthread, NULL, StatusLoop, NULL);\r
+ \r
++ /*\r
++ * reading stdin does not work with output redirection or running the\r
++ * program in background\r
++ */\r
++ /*\r
+ char input;\r
+ std::cin.unsetf(std::ios_base::skipws);\r
+ std::cin >> input;\r
++ */\r
++\r
++ /* wait for signal */\r
++ pause();\r
+ \r
+ if(Settings->GetI("dht") > 0) {\r
+ saveDHT();\r
+diff --git a/core.h b/core.h
+index f48782a..ada28eb 100644
+--- a/core.h
++++ b/core.h
+@@ -20,6 +20,7 @@ class CCore
+
+ void loadDHT();
+ void saveDHT();
++ void ScheduleSignal(int signo);
+
+ int _argc;
+ char** _argv;
+diff --git a/settings.cpp b/settings.cpp
+index 19b5187..f799f04 100644
+--- a/settings.cpp
++++ b/settings.cpp
+@@ -38,7 +38,11 @@ CSettings::LoadConfig()
+ \r
+ std::ifstream config(path.c_str(), std::ifstream::in);\r
+ if(!config.is_open()) {\r
+- Core->VerbosePrint("Settings", "Could not load config file. Will use default values.");\r
++ /* workaround: Core->VerbosePrint uses non-initialized Core */\r
++ std::cout << "[Settings] Could not load config file. Will use default values." << std::endl;\r
++\r
++ /* cannot do: Core may not be properly initialized */\r
++ /* Core->VerbosePrint("Settings", "Could not load config file. Will use default values.");*/\r
+ return;\r
+ }\r
+ \r
--- /dev/null
+diff --git a/Makefile b/Makefile
+index 20c89a9..677bd48 100644
+--- a/Makefile
++++ b/Makefile
+@@ -9,7 +9,7 @@ $(OUT): $(OBJ)
+ $(CXX) $(LDFLAGS) $(OBJ) $(LIBS) -o $(OUT)
+
+ clean:
+- rm -rf $(OBJ) $(OUT)
++ -rm -rf $(OBJ) $(OUT) *~
+
+ install: all
+ @install hrktorrent ${PREFIX}/bin
+@@ -19,6 +19,6 @@ install: all
+ @install -m 644 hrktorrent.1 ${MANPREFIX}/man1/hrktorrent.1
+
+ uninstall:
+- @rm -f ${PREFIX}/bin/hrktorrent
+- @rm -f ${MANPREFIX}/man1/hrktorrent.1
+- @rm -r ${PREFIX}/share/examples/hrktorrent/
++ -@rm -f ${PREFIX}/bin/hrktorrent
++ -@rm -f ${MANPREFIX}/man1/hrktorrent.1
++ -@rm -r ${PREFIX}/share/examples/hrktorrent/
+diff --git a/core.cpp b/core.cpp
+index 5b30d20..8f51f52 100644
+--- a/core.cpp
++++ b/core.cpp
+@@ -55,13 +55,10 @@ CCore::StatusLoop(void* data)
+ if (stdout_is_tty) {\r
+ if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) < 0) {\r
+ char errbuf[128];\r
+- std::string errstring;\r
+ \r
+ strerror_r(errno, errbuf, 128);\r
+- errstring = "ioctl: ";\r
+- errstring.append((const char *) errbuf);\r
++ std::cerr << "ioctl error: " << errbuf << std::endl;\r
+ \r
+- Core->VerbosePrint("Core", errstring);\r
+ exit(EXIT_FAILURE);\r
+ }\r
+ columns = ws.ws_col;\r
+@@ -113,15 +110,6 @@ CCore::StatusLoop(void* data)
+ output << eta << "s";\r
+ s_output = output.str();\r
+ \r
+- if(s_output.length() > columns) {\r
+- s_output.resize(columns - 3);\r
+- s_output.append("..");\r
+- }\r
+- else if(s_output.length() < columns) {\r
+- for(int i = 0; i < s_output.length() - columns; i++)\r
+- s_output.append(" ");\r
+- }\r
+-\r
+ if (stdout_is_tty) {\r
+ if(s_output.length() > columns) {\r
+ s_output.resize(columns - 3);\r
+@@ -199,6 +187,31 @@ CCore::saveDHT()
+ nodefile.close();\r
+ }\r
+ \r
++static void\r
++SignalHandler(int signo)\r
++{\r
++ Core->VerbosePrint("Core", "Received signal.");\r
++}\r
++\r
++void\r
++CCore::ScheduleSignal(int signo)\r
++{\r
++ struct sigaction sa;\r
++\r
++ memset(&sa, 0, sizeof(sa));\r
++ sa.sa_handler = SignalHandler;\r
++ sa.sa_flags = SA_RESETHAND;\r
++\r
++ if (sigaction(signo, &sa, NULL) < 0) {\r
++ char errbuf[128];\r
++\r
++ strerror_r(errno, errbuf, 128);\r
++\r
++ std::cerr << "sigaction error: " << errbuf << std::endl;\r
++ exit(EXIT_FAILURE);\r
++ }\r
++}\r
++\r
+ int\r
+ CCore::Run()\r
+ {\r
+@@ -261,12 +274,23 @@ CCore::Run()
+ _session->set_ip_filter(IPFilter->getFilter());\r
+ }\r
+ \r
+- std::cout << "\"Return\" shuts hrktorrent down.\n" << std::endl;\r
++ ScheduleSignal(SIGINT);\r
++ std::cout << "\"CTRL-C\" shuts hrktorrent down.\n" << std::endl;\r
++\r
+ pthread_create(&statusthread, NULL, StatusLoop, NULL);\r
+ \r
++ /*\r
++ * reading stdin does not work with output redirection or running the\r
++ * program in background\r
++ */\r
++ /*\r
+ char input;\r
+ std::cin.unsetf(std::ios_base::skipws);\r
+ std::cin >> input;\r
++ */\r
++\r
++ /* wait for signal */\r
++ pause();\r
+ \r
+ if(Settings->GetI("dht") > 0) {\r
+ saveDHT();\r
+diff --git a/core.h b/core.h
+index f48782a..ada28eb 100644
+--- a/core.h
++++ b/core.h
+@@ -20,6 +20,7 @@ class CCore
+
+ void loadDHT();
+ void saveDHT();
++ void ScheduleSignal(int signo);
+
+ int _argc;
+ char** _argv;
+diff --git a/settings.cpp b/settings.cpp
+index 19b5187..f799f04 100644
+--- a/settings.cpp
++++ b/settings.cpp
+@@ -38,7 +38,11 @@ CSettings::LoadConfig()
+ \r
+ std::ifstream config(path.c_str(), std::ifstream::in);\r
+ if(!config.is_open()) {\r
+- Core->VerbosePrint("Settings", "Could not load config file. Will use default values.");\r
++ /* workaround: Core->VerbosePrint uses non-initialized Core */\r
++ std::cout << "[Settings] Could not load config file. Will use default values." << std::endl;\r
++\r
++ /* cannot do: Core may not be properly initialized */\r
++ /* Core->VerbosePrint("Settings", "Could not load config file. Will use default values.");*/\r
+ return;\r
+ }\r
+ \r
+diff --git a/vars.mk b/vars.mk
+index e79e4e5..aca9a50 100644
+--- a/vars.mk
++++ b/vars.mk
+@@ -3,6 +3,5 @@ MANPREFIX = ${PREFIX}/man
+ CXX? = g++
+ OBJ = main.o core.o settings.o utils.o ipfilter.o
+ OUT = hrktorrent
+-CXXFLAGS += `pkg-config --cflags libtorrent`
+-LIBS = `pkg-config --libs libtorrent` -lpthread
+-
++CXXFLAGS += -DTORRENT_USE_OPENSSL -pthread -I/usr/local/include -I/usr/local/include/libtorrent -DTORRENT_VERBOSE_LOGGING
++LIBS = -lpthread -L/usr/local/lib -ltorrent-rasterbar
--- /dev/null
+diff --git a/Makefile b/Makefile
+index 20c89a9..677bd48 100644
+--- a/Makefile
++++ b/Makefile
+@@ -9,7 +9,7 @@ $(OUT): $(OBJ)
+ $(CXX) $(LDFLAGS) $(OBJ) $(LIBS) -o $(OUT)
+
+ clean:
+- rm -rf $(OBJ) $(OUT)
++ -rm -rf $(OBJ) $(OUT) *~
+
+ install: all
+ @install hrktorrent ${PREFIX}/bin
+@@ -19,6 +19,6 @@ install: all
+ @install -m 644 hrktorrent.1 ${MANPREFIX}/man1/hrktorrent.1
+
+ uninstall:
+- @rm -f ${PREFIX}/bin/hrktorrent
+- @rm -f ${MANPREFIX}/man1/hrktorrent.1
+- @rm -r ${PREFIX}/share/examples/hrktorrent/
++ -@rm -f ${PREFIX}/bin/hrktorrent
++ -@rm -f ${MANPREFIX}/man1/hrktorrent.1
++ -@rm -r ${PREFIX}/share/examples/hrktorrent/
--- /dev/null
+diff --git a/core.cpp b/core.cpp
+index 5b30d20..8f51f52 100644
+--- a/core.cpp
++++ b/core.cpp
+@@ -55,13 +55,10 @@ CCore::StatusLoop(void* data)
+ if (stdout_is_tty) {\r
+ if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) < 0) {\r
+ char errbuf[128];\r
+- std::string errstring;\r
+ \r
+ strerror_r(errno, errbuf, 128);\r
+- errstring = "ioctl: ";\r
+- errstring.append((const char *) errbuf);\r
++ std::cerr << "ioctl error: " << errbuf << std::endl;\r
+ \r
+- Core->VerbosePrint("Core", errstring);\r
+ exit(EXIT_FAILURE);\r
+ }\r
+ columns = ws.ws_col;\r
+@@ -113,15 +110,6 @@ CCore::StatusLoop(void* data)
+ output << eta << "s";\r
+ s_output = output.str();\r
+ \r
+- if(s_output.length() > columns) {\r
+- s_output.resize(columns - 3);\r
+- s_output.append("..");\r
+- }\r
+- else if(s_output.length() < columns) {\r
+- for(int i = 0; i < s_output.length() - columns; i++)\r
+- s_output.append(" ");\r
+- }\r
+-\r
+ if (stdout_is_tty) {\r
+ if(s_output.length() > columns) {\r
+ s_output.resize(columns - 3);\r
+@@ -199,6 +187,31 @@ CCore::saveDHT()
+ nodefile.close();\r
+ }\r
+ \r
++static void\r
++SignalHandler(int signo)\r
++{\r
++ Core->VerbosePrint("Core", "Received signal.");\r
++}\r
++\r
++void\r
++CCore::ScheduleSignal(int signo)\r
++{\r
++ struct sigaction sa;\r
++\r
++ memset(&sa, 0, sizeof(sa));\r
++ sa.sa_handler = SignalHandler;\r
++ sa.sa_flags = SA_RESETHAND;\r
++\r
++ if (sigaction(signo, &sa, NULL) < 0) {\r
++ char errbuf[128];\r
++\r
++ strerror_r(errno, errbuf, 128);\r
++\r
++ std::cerr << "sigaction error: " << errbuf << std::endl;\r
++ exit(EXIT_FAILURE);\r
++ }\r
++}\r
++\r
+ int\r
+ CCore::Run()\r
+ {\r
+@@ -261,12 +274,23 @@ CCore::Run()
+ _session->set_ip_filter(IPFilter->getFilter());\r
+ }\r
+ \r
+- std::cout << "\"Return\" shuts hrktorrent down.\n" << std::endl;\r
++ ScheduleSignal(SIGINT);\r
++ std::cout << "\"CTRL-C\" shuts hrktorrent down.\n" << std::endl;\r
++\r
+ pthread_create(&statusthread, NULL, StatusLoop, NULL);\r
+ \r
++ /*\r
++ * reading stdin does not work with output redirection or running the\r
++ * program in background\r
++ */\r
++ /*\r
+ char input;\r
+ std::cin.unsetf(std::ios_base::skipws);\r
+ std::cin >> input;\r
++ */\r
++\r
++ /* wait for signal */\r
++ pause();\r
+ \r
+ if(Settings->GetI("dht") > 0) {\r
+ saveDHT();\r
+diff --git a/core.h b/core.h
+index f48782a..ada28eb 100644
+--- a/core.h
++++ b/core.h
+@@ -20,6 +20,7 @@ class CCore
+
+ void loadDHT();
+ void saveDHT();
++ void ScheduleSignal(int signo);
+
+ int _argc;
+ char** _argv;
--- /dev/null
+diff --git a/settings.cpp b/settings.cpp
+index 19b5187..f799f04 100644
+--- a/settings.cpp
++++ b/settings.cpp
+@@ -38,7 +38,11 @@ CSettings::LoadConfig()
+ \r
+ std::ifstream config(path.c_str(), std::ifstream::in);\r
+ if(!config.is_open()) {\r
+- Core->VerbosePrint("Settings", "Could not load config file. Will use default values.");\r
++ /* workaround: Core->VerbosePrint uses non-initialized Core */\r
++ std::cout << "[Settings] Could not load config file. Will use default values." << std::endl;\r
++\r
++ /* cannot do: Core may not be properly initialized */\r
++ /* Core->VerbosePrint("Settings", "Could not load config file. Will use default values.");*/\r
+ return;\r
+ }\r
+ \r
--- /dev/null
+diff --git a/vars.mk b/vars.mk
+index e79e4e5..aca9a50 100644
+--- a/vars.mk
++++ b/vars.mk
+@@ -3,6 +3,5 @@ MANPREFIX = ${PREFIX}/man
+ CXX? = g++
+ OBJ = main.o core.o settings.o utils.o ipfilter.o
+ OUT = hrktorrent
+-CXXFLAGS += `pkg-config --cflags libtorrent`
+-LIBS = `pkg-config --libs libtorrent` -lpthread
+-
++CXXFLAGS += -DTORRENT_USE_OPENSSL -pthread -I/usr/local/include -I/usr/local/include/libtorrent -DTORRENT_VERBOSE_LOGGING
++LIBS = -lpthread -L/usr/local/lib -ltorrent-rasterbar